blob: a0f8a0ca0788adccb4459ff0eec6e8dc8d095f29 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/mm/mmap.c
4 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/mman.h>
8#include <linux/shm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +01009#include <linux/sched/signal.h>
Ingo Molnar01042602017-02-08 18:51:31 +010010#include <linux/sched/mm.h>
Russell King09d9bae2008-09-05 14:08:44 +010011#include <linux/io.h>
Nicolas Pitredf5419a2011-04-13 04:57:17 +010012#include <linux/personality.h>
Nicolas Pitrecc92c282010-06-14 21:16:19 -040013#include <linux/random.h>
Rob Herring41dfaa92011-11-22 04:01:06 +010014#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#define COLOUR_ALIGN(addr,pgoff) \
17 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
19
20/*
21 * We need to ensure that shared mappings are correctly aligned to
22 * avoid aliasing issues with VIPT caches. We need to ensure that
23 * a specific page of an object is always mapped at a multiple of
24 * SHMLBA bytes.
25 *
26 * We unconditionally provide this function for all cases, however
27 * in the VIVT case, we optimise out the alignment rules.
28 */
29unsigned long
30arch_get_unmapped_area(struct file *filp, unsigned long addr,
31 unsigned long len, unsigned long pgoff, unsigned long flags)
32{
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct *vma;
Rob Herring41dfaa92011-11-22 04:01:06 +010035 int do_align = 0;
36 int aliasing = cache_is_vipt_aliasing();
Michel Lespinasse394ef642012-12-11 16:02:10 -080037 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39 /*
40 * We only need to do colour alignment if either the I or D
Rob Herring41dfaa92011-11-22 04:01:06 +010041 * caches alias.
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 */
Rob Herring41dfaa92011-11-22 04:01:06 +010043 if (aliasing)
44 do_align = filp || (flags & MAP_SHARED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46 /*
Benjamin Herrenschmidtacec0ac2007-05-06 14:50:07 -070047 * We enforce the MAP_FIXED case.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 */
49 if (flags & MAP_FIXED) {
Al Viroe77414e2009-12-05 15:10:44 -050050 if (aliasing && flags & MAP_SHARED &&
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 return -EINVAL;
53 return addr;
54 }
55
56 if (len > TASK_SIZE)
57 return -ENOMEM;
58
59 if (addr) {
60 if (do_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
64
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -070067 (!vma || addr + len <= vm_start_gap(vma)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 return addr;
69 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Michel Lespinasse394ef642012-12-11 16:02:10 -080071 info.flags = 0;
72 info.length = len;
73 info.low_limit = mm->mmap_base;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
Rob Herring7dbaa462011-11-22 04:01:07 +010080unsigned long
81arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82 const unsigned long len, const unsigned long pgoff,
83 const unsigned long flags)
84{
85 struct vm_area_struct *vma;
86 struct mm_struct *mm = current->mm;
87 unsigned long addr = addr0;
88 int do_align = 0;
89 int aliasing = cache_is_vipt_aliasing();
Michel Lespinasse394ef642012-12-11 16:02:10 -080090 struct vm_unmapped_area_info info;
Rob Herring7dbaa462011-11-22 04:01:07 +010091
92 /*
93 * We only need to do colour alignment if either the I or D
94 * caches alias.
95 */
96 if (aliasing)
97 do_align = filp || (flags & MAP_SHARED);
98
99 /* requested length too big for entire address space */
100 if (len > TASK_SIZE)
101 return -ENOMEM;
102
103 if (flags & MAP_FIXED) {
104 if (aliasing && flags & MAP_SHARED &&
105 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
106 return -EINVAL;
107 return addr;
108 }
109
110 /* requesting a specific address */
111 if (addr) {
112 if (do_align)
113 addr = COLOUR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
116 vma = find_vma(mm, addr);
117 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700118 (!vma || addr + len <= vm_start_gap(vma)))
Rob Herring7dbaa462011-11-22 04:01:07 +0100119 return addr;
120 }
121
Michel Lespinasse394ef642012-12-11 16:02:10 -0800122 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
123 info.length = len;
Russell Kingd8aa7122013-11-28 21:43:40 +0000124 info.low_limit = FIRST_USER_ADDRESS;
Michel Lespinasse394ef642012-12-11 16:02:10 -0800125 info.high_limit = mm->mmap_base;
126 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
127 info.align_offset = pgoff << PAGE_SHIFT;
128 addr = vm_unmapped_area(&info);
Rob Herring7dbaa462011-11-22 04:01:07 +0100129
Rob Herring7dbaa462011-11-22 04:01:07 +0100130 /*
131 * A failed mmap() very likely causes application failure,
132 * so fall back to the bottom-up function here. This scenario
133 * can happen with large stack limits and large mmap()
134 * allocations.
135 */
Michel Lespinasse394ef642012-12-11 16:02:10 -0800136 if (addr & ~PAGE_MASK) {
137 VM_BUG_ON(addr != -ENOMEM);
138 info.flags = 0;
139 info.low_limit = mm->mmap_base;
140 info.high_limit = TASK_SIZE;
141 addr = vm_unmapped_area(&info);
142 }
Rob Herring7dbaa462011-11-22 04:01:07 +0100143
144 return addr;
145}
146
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100147/*
148 * You really shouldn't be using read() or write() on /dev/mem. This
149 * might go away in the future.
150 */
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400151int valid_phys_addr_range(phys_addr_t addr, size_t size)
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100152{
Alexandre Rusev9ae3ae02008-02-26 18:42:10 +0100153 if (addr < PHYS_OFFSET)
154 return 0;
Greg Ungerer6806bfe2009-10-02 00:45:28 +0100155 if (addr + size > __pa(high_memory - 1) + 1)
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100156 return 0;
157
158 return 1;
159}
160
161/*
Sergey Dyasly3159f372013-09-24 16:38:00 +0100162 * Do not allow /dev/mem mappings beyond the supported physical range.
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100163 */
164int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
165{
Sergey Dyasly3159f372013-09-24 16:38:00 +0100166 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100167}