Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/mmap.c |
| 3 | * |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 4 | * Copyright (C) 2008 - 2009 Paul Mundt |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | #include <linux/io.h> |
| 11 | #include <linux/mm.h> |
Ingo Molnar | 0104260 | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 12 | #include <linux/sched/mm.h> |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 13 | #include <linux/mman.h> |
| 14 | #include <linux/module.h> |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 15 | #include <asm/page.h> |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 16 | #include <asm/processor.h> |
| 17 | |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 18 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
| 19 | EXPORT_SYMBOL(shm_align_mask); |
| 20 | |
Paul Mundt | dde5e3f | 2009-08-15 09:49:32 +0900 | [diff] [blame] | 21 | #ifdef CONFIG_MMU |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 22 | /* |
| 23 | * To avoid cache aliases, we map the shared page with same color. |
| 24 | */ |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 25 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, |
| 26 | unsigned long pgoff) |
| 27 | { |
| 28 | unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; |
| 29 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; |
| 30 | |
| 31 | return base + off; |
| 32 | } |
| 33 | |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 34 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
| 35 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 36 | { |
| 37 | struct mm_struct *mm = current->mm; |
| 38 | struct vm_area_struct *vma; |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 39 | int do_colour_align; |
Michel Lespinasse | b4265f1 | 2012-12-11 16:02:12 -0800 | [diff] [blame] | 40 | struct vm_unmapped_area_info info; |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 41 | |
| 42 | if (flags & MAP_FIXED) { |
| 43 | /* We do not accept a shared mapping if it would violate |
| 44 | * cache aliasing constraints. |
| 45 | */ |
Al Viro | e77414e | 2009-12-05 15:10:44 -0500 | [diff] [blame] | 46 | if ((flags & MAP_SHARED) && |
| 47 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 48 | return -EINVAL; |
| 49 | return addr; |
| 50 | } |
| 51 | |
| 52 | if (unlikely(len > TASK_SIZE)) |
| 53 | return -ENOMEM; |
| 54 | |
| 55 | do_colour_align = 0; |
| 56 | if (filp || (flags & MAP_SHARED)) |
| 57 | do_colour_align = 1; |
| 58 | |
| 59 | if (addr) { |
| 60 | if (do_colour_align) |
| 61 | addr = COLOUR_ALIGN(addr, pgoff); |
| 62 | else |
| 63 | addr = PAGE_ALIGN(addr); |
| 64 | |
| 65 | vma = find_vma(mm, addr); |
| 66 | if (TASK_SIZE - len >= addr && |
Hugh Dickins | 1be7107 | 2017-06-19 04:03:24 -0700 | [diff] [blame] | 67 | (!vma || addr + len <= vm_start_gap(vma))) |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 68 | return addr; |
| 69 | } |
| 70 | |
Michel Lespinasse | b4265f1 | 2012-12-11 16:02:12 -0800 | [diff] [blame] | 71 | info.flags = 0; |
| 72 | info.length = len; |
| 73 | info.low_limit = TASK_UNMAPPED_BASE; |
| 74 | info.high_limit = TASK_SIZE; |
| 75 | info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; |
| 76 | info.align_offset = pgoff << PAGE_SHIFT; |
| 77 | return vm_unmapped_area(&info); |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 78 | } |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 79 | |
| 80 | unsigned long |
| 81 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
| 82 | const unsigned long len, const unsigned long pgoff, |
| 83 | const unsigned long flags) |
| 84 | { |
| 85 | struct vm_area_struct *vma; |
| 86 | struct mm_struct *mm = current->mm; |
| 87 | unsigned long addr = addr0; |
| 88 | int do_colour_align; |
Michel Lespinasse | b4265f1 | 2012-12-11 16:02:12 -0800 | [diff] [blame] | 89 | struct vm_unmapped_area_info info; |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 90 | |
| 91 | if (flags & MAP_FIXED) { |
| 92 | /* We do not accept a shared mapping if it would violate |
| 93 | * cache aliasing constraints. |
| 94 | */ |
| 95 | if ((flags & MAP_SHARED) && |
| 96 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) |
| 97 | return -EINVAL; |
| 98 | return addr; |
| 99 | } |
| 100 | |
| 101 | if (unlikely(len > TASK_SIZE)) |
| 102 | return -ENOMEM; |
| 103 | |
| 104 | do_colour_align = 0; |
| 105 | if (filp || (flags & MAP_SHARED)) |
| 106 | do_colour_align = 1; |
| 107 | |
| 108 | /* requesting a specific address */ |
| 109 | if (addr) { |
| 110 | if (do_colour_align) |
| 111 | addr = COLOUR_ALIGN(addr, pgoff); |
| 112 | else |
| 113 | addr = PAGE_ALIGN(addr); |
| 114 | |
| 115 | vma = find_vma(mm, addr); |
| 116 | if (TASK_SIZE - len >= addr && |
Hugh Dickins | 1be7107 | 2017-06-19 04:03:24 -0700 | [diff] [blame] | 117 | (!vma || addr + len <= vm_start_gap(vma))) |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 118 | return addr; |
| 119 | } |
| 120 | |
Michel Lespinasse | b4265f1 | 2012-12-11 16:02:12 -0800 | [diff] [blame] | 121 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 122 | info.length = len; |
| 123 | info.low_limit = PAGE_SIZE; |
| 124 | info.high_limit = mm->mmap_base; |
| 125 | info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; |
| 126 | info.align_offset = pgoff << PAGE_SHIFT; |
| 127 | addr = vm_unmapped_area(&info); |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 128 | |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 129 | /* |
| 130 | * A failed mmap() very likely causes application failure, |
| 131 | * so fall back to the bottom-up function here. This scenario |
| 132 | * can happen with large stack limits and large mmap() |
| 133 | * allocations. |
| 134 | */ |
Michel Lespinasse | b4265f1 | 2012-12-11 16:02:12 -0800 | [diff] [blame] | 135 | if (addr & ~PAGE_MASK) { |
| 136 | VM_BUG_ON(addr != -ENOMEM); |
| 137 | info.flags = 0; |
| 138 | info.low_limit = TASK_UNMAPPED_BASE; |
| 139 | info.high_limit = TASK_SIZE; |
| 140 | addr = vm_unmapped_area(&info); |
| 141 | } |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 142 | |
| 143 | return addr; |
| 144 | } |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 145 | #endif /* CONFIG_MMU */ |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 146 | |
| 147 | /* |
| 148 | * You really shouldn't be using read() or write() on /dev/mem. This |
| 149 | * might go away in the future. |
| 150 | */ |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 151 | int valid_phys_addr_range(phys_addr_t addr, size_t count) |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 152 | { |
Paul Mundt | 10840f0 | 2008-11-13 15:38:02 +0900 | [diff] [blame] | 153 | if (addr < __MEMORY_START) |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 154 | return 0; |
| 155 | if (addr + count > __pa(high_memory)) |
| 156 | return 0; |
| 157 | |
| 158 | return 1; |
| 159 | } |
| 160 | |
| 161 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
| 162 | { |
| 163 | return 1; |
| 164 | } |