Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/mmap.c |
| 3 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/fs.h> |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/mman.h> |
| 7 | #include <linux/shm.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 8 | #include <linux/sched/signal.h> |
Ingo Molnar | 0104260 | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 9 | #include <linux/sched/mm.h> |
Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 10 | #include <linux/io.h> |
Nicolas Pitre | df5419a | 2011-04-13 04:57:17 +0100 | [diff] [blame] | 11 | #include <linux/personality.h> |
Nicolas Pitre | cc92c28 | 2010-06-14 21:16:19 -0400 | [diff] [blame] | 12 | #include <linux/random.h> |
Rob Herring | 41dfaa9 | 2011-11-22 04:01:06 +0100 | [diff] [blame] | 13 | #include <asm/cachetype.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
| 15 | #define COLOUR_ALIGN(addr,pgoff) \ |
| 16 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ |
| 17 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) |
| 18 | |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 19 | /* gap between mmap and stack */ |
| 20 | #define MIN_GAP (128*1024*1024UL) |
| 21 | #define MAX_GAP ((TASK_SIZE)/6*5) |
| 22 | |
| 23 | static int mmap_is_legacy(void) |
| 24 | { |
| 25 | if (current->personality & ADDR_COMPAT_LAYOUT) |
| 26 | return 1; |
| 27 | |
| 28 | if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) |
| 29 | return 1; |
| 30 | |
| 31 | return sysctl_legacy_va_layout; |
| 32 | } |
| 33 | |
| 34 | static unsigned long mmap_base(unsigned long rnd) |
| 35 | { |
| 36 | unsigned long gap = rlimit(RLIMIT_STACK); |
| 37 | |
| 38 | if (gap < MIN_GAP) |
| 39 | gap = MIN_GAP; |
| 40 | else if (gap > MAX_GAP) |
| 41 | gap = MAX_GAP; |
| 42 | |
| 43 | return PAGE_ALIGN(TASK_SIZE - gap - rnd); |
| 44 | } |
| 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* |
| 47 | * We need to ensure that shared mappings are correctly aligned to |
| 48 | * avoid aliasing issues with VIPT caches. We need to ensure that |
| 49 | * a specific page of an object is always mapped at a multiple of |
| 50 | * SHMLBA bytes. |
| 51 | * |
| 52 | * We unconditionally provide this function for all cases, however |
| 53 | * in the VIVT case, we optimise out the alignment rules. |
| 54 | */ |
| 55 | unsigned long |
| 56 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
| 57 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 58 | { |
| 59 | struct mm_struct *mm = current->mm; |
| 60 | struct vm_area_struct *vma; |
Rob Herring | 41dfaa9 | 2011-11-22 04:01:06 +0100 | [diff] [blame] | 61 | int do_align = 0; |
| 62 | int aliasing = cache_is_vipt_aliasing(); |
Michel Lespinasse | 394ef64 | 2012-12-11 16:02:10 -0800 | [diff] [blame] | 63 | struct vm_unmapped_area_info info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
| 65 | /* |
| 66 | * We only need to do colour alignment if either the I or D |
Rob Herring | 41dfaa9 | 2011-11-22 04:01:06 +0100 | [diff] [blame] | 67 | * caches alias. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | */ |
Rob Herring | 41dfaa9 | 2011-11-22 04:01:06 +0100 | [diff] [blame] | 69 | if (aliasing) |
| 70 | do_align = filp || (flags & MAP_SHARED); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
| 72 | /* |
Benjamin Herrenschmidt | acec0ac | 2007-05-06 14:50:07 -0700 | [diff] [blame] | 73 | * We enforce the MAP_FIXED case. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | */ |
| 75 | if (flags & MAP_FIXED) { |
Al Viro | e77414e | 2009-12-05 15:10:44 -0500 | [diff] [blame] | 76 | if (aliasing && flags & MAP_SHARED && |
| 77 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | return -EINVAL; |
| 79 | return addr; |
| 80 | } |
| 81 | |
| 82 | if (len > TASK_SIZE) |
| 83 | return -ENOMEM; |
| 84 | |
| 85 | if (addr) { |
| 86 | if (do_align) |
| 87 | addr = COLOUR_ALIGN(addr, pgoff); |
| 88 | else |
| 89 | addr = PAGE_ALIGN(addr); |
| 90 | |
| 91 | vma = find_vma(mm, addr); |
| 92 | if (TASK_SIZE - len >= addr && |
Hugh Dickins | 1be7107 | 2017-06-19 04:03:24 -0700 | [diff] [blame] | 93 | (!vma || addr + len <= vm_start_gap(vma))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | return addr; |
| 95 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
Michel Lespinasse | 394ef64 | 2012-12-11 16:02:10 -0800 | [diff] [blame] | 97 | info.flags = 0; |
| 98 | info.length = len; |
| 99 | info.low_limit = mm->mmap_base; |
| 100 | info.high_limit = TASK_SIZE; |
| 101 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; |
| 102 | info.align_offset = pgoff << PAGE_SHIFT; |
| 103 | return vm_unmapped_area(&info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | } |
| 105 | |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 106 | unsigned long |
| 107 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
| 108 | const unsigned long len, const unsigned long pgoff, |
| 109 | const unsigned long flags) |
| 110 | { |
| 111 | struct vm_area_struct *vma; |
| 112 | struct mm_struct *mm = current->mm; |
| 113 | unsigned long addr = addr0; |
| 114 | int do_align = 0; |
| 115 | int aliasing = cache_is_vipt_aliasing(); |
Michel Lespinasse | 394ef64 | 2012-12-11 16:02:10 -0800 | [diff] [blame] | 116 | struct vm_unmapped_area_info info; |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * We only need to do colour alignment if either the I or D |
| 120 | * caches alias. |
| 121 | */ |
| 122 | if (aliasing) |
| 123 | do_align = filp || (flags & MAP_SHARED); |
| 124 | |
| 125 | /* requested length too big for entire address space */ |
| 126 | if (len > TASK_SIZE) |
| 127 | return -ENOMEM; |
| 128 | |
| 129 | if (flags & MAP_FIXED) { |
| 130 | if (aliasing && flags & MAP_SHARED && |
| 131 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) |
| 132 | return -EINVAL; |
| 133 | return addr; |
| 134 | } |
| 135 | |
| 136 | /* requesting a specific address */ |
| 137 | if (addr) { |
| 138 | if (do_align) |
| 139 | addr = COLOUR_ALIGN(addr, pgoff); |
| 140 | else |
| 141 | addr = PAGE_ALIGN(addr); |
| 142 | vma = find_vma(mm, addr); |
| 143 | if (TASK_SIZE - len >= addr && |
Hugh Dickins | 1be7107 | 2017-06-19 04:03:24 -0700 | [diff] [blame] | 144 | (!vma || addr + len <= vm_start_gap(vma))) |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 145 | return addr; |
| 146 | } |
| 147 | |
Michel Lespinasse | 394ef64 | 2012-12-11 16:02:10 -0800 | [diff] [blame] | 148 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 149 | info.length = len; |
Russell King | d8aa712 | 2013-11-28 21:43:40 +0000 | [diff] [blame] | 150 | info.low_limit = FIRST_USER_ADDRESS; |
Michel Lespinasse | 394ef64 | 2012-12-11 16:02:10 -0800 | [diff] [blame] | 151 | info.high_limit = mm->mmap_base; |
| 152 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; |
| 153 | info.align_offset = pgoff << PAGE_SHIFT; |
| 154 | addr = vm_unmapped_area(&info); |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 155 | |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 156 | /* |
| 157 | * A failed mmap() very likely causes application failure, |
| 158 | * so fall back to the bottom-up function here. This scenario |
| 159 | * can happen with large stack limits and large mmap() |
| 160 | * allocations. |
| 161 | */ |
Michel Lespinasse | 394ef64 | 2012-12-11 16:02:10 -0800 | [diff] [blame] | 162 | if (addr & ~PAGE_MASK) { |
| 163 | VM_BUG_ON(addr != -ENOMEM); |
| 164 | info.flags = 0; |
| 165 | info.low_limit = mm->mmap_base; |
| 166 | info.high_limit = TASK_SIZE; |
| 167 | addr = vm_unmapped_area(&info); |
| 168 | } |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 169 | |
| 170 | return addr; |
| 171 | } |
| 172 | |
Kees Cook | 2b68f6c | 2015-04-14 15:48:00 -0700 | [diff] [blame] | 173 | unsigned long arch_mmap_rnd(void) |
Kees Cook | fbbc400 | 2015-04-14 15:47:41 -0700 | [diff] [blame] | 174 | { |
| 175 | unsigned long rnd; |
| 176 | |
Daniel Cashman | 5ef11c3 | 2016-02-26 15:19:37 -0800 | [diff] [blame] | 177 | rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); |
Kees Cook | fbbc400 | 2015-04-14 15:47:41 -0700 | [diff] [blame] | 178 | |
| 179 | return rnd << PAGE_SHIFT; |
| 180 | } |
| 181 | |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 182 | void arch_pick_mmap_layout(struct mm_struct *mm) |
| 183 | { |
| 184 | unsigned long random_factor = 0UL; |
| 185 | |
Kees Cook | fbbc400 | 2015-04-14 15:47:41 -0700 | [diff] [blame] | 186 | if (current->flags & PF_RANDOMIZE) |
Kees Cook | 2b68f6c | 2015-04-14 15:48:00 -0700 | [diff] [blame] | 187 | random_factor = arch_mmap_rnd(); |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 188 | |
| 189 | if (mmap_is_legacy()) { |
| 190 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; |
| 191 | mm->get_unmapped_area = arch_get_unmapped_area; |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 192 | } else { |
| 193 | mm->mmap_base = mmap_base(random_factor); |
| 194 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 195 | } |
| 196 | } |
Lennert Buytenhek | 51635ad | 2006-09-16 10:50:22 +0100 | [diff] [blame] | 197 | |
| 198 | /* |
| 199 | * You really shouldn't be using read() or write() on /dev/mem. This |
| 200 | * might go away in the future. |
| 201 | */ |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 202 | int valid_phys_addr_range(phys_addr_t addr, size_t size) |
Lennert Buytenhek | 51635ad | 2006-09-16 10:50:22 +0100 | [diff] [blame] | 203 | { |
Alexandre Rusev | 9ae3ae0 | 2008-02-26 18:42:10 +0100 | [diff] [blame] | 204 | if (addr < PHYS_OFFSET) |
| 205 | return 0; |
Greg Ungerer | 6806bfe | 2009-10-02 00:45:28 +0100 | [diff] [blame] | 206 | if (addr + size > __pa(high_memory - 1) + 1) |
Lennert Buytenhek | 51635ad | 2006-09-16 10:50:22 +0100 | [diff] [blame] | 207 | return 0; |
| 208 | |
| 209 | return 1; |
| 210 | } |
| 211 | |
| 212 | /* |
Sergey Dyasly | 3159f37 | 2013-09-24 16:38:00 +0100 | [diff] [blame] | 213 | * Do not allow /dev/mem mappings beyond the supported physical range. |
Lennert Buytenhek | 51635ad | 2006-09-16 10:50:22 +0100 | [diff] [blame] | 214 | */ |
| 215 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
| 216 | { |
Sergey Dyasly | 3159f37 | 2013-09-24 16:38:00 +0100 | [diff] [blame] | 217 | return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); |
Lennert Buytenhek | 51635ad | 2006-09-16 10:50:22 +0100 | [diff] [blame] | 218 | } |
Nicolas Pitre | 087aaff | 2010-09-22 18:34:36 -0400 | [diff] [blame] | 219 | |
| 220 | #ifdef CONFIG_STRICT_DEVMEM |
| 221 | |
| 222 | #include <linux/ioport.h> |
| 223 | |
| 224 | /* |
| 225 | * devmem_is_allowed() checks to see if /dev/mem access to a certain |
| 226 | * address is valid. The argument is a physical page number. |
| 227 | * We mimic x86 here by disallowing access to system RAM as well as |
| 228 | * device-exclusive MMIO regions. This effectively disable read()/write() |
| 229 | * on /dev/mem. |
| 230 | */ |
| 231 | int devmem_is_allowed(unsigned long pfn) |
| 232 | { |
| 233 | if (iomem_is_exclusive(pfn << PAGE_SHIFT)) |
| 234 | return 0; |
| 235 | if (!page_is_ram(pfn)) |
| 236 | return 1; |
| 237 | return 0; |
| 238 | } |
| 239 | |
| 240 | #endif |