Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 Wind River Systems Inc |
| 3 | * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com |
| 4 | * |
| 5 | * based on arch/mips/mm/fault.c which is: |
| 6 | * |
| 7 | * Copyright (C) 1995-2000 Ralf Baechle |
| 8 | * |
| 9 | * This file is subject to the terms and conditions of the GNU General Public |
| 10 | * License. See the file "COPYING" in the main directory of this archive |
| 11 | * for more details. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/signal.h> |
| 15 | #include <linux/sched.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 16 | #include <linux/sched/debug.h> |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/errno.h> |
| 20 | #include <linux/string.h> |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/ptrace.h> |
| 23 | #include <linux/mman.h> |
| 24 | #include <linux/mm.h> |
Paul Gortmaker | 7e227e8 | 2017-01-10 09:51:08 -0500 | [diff] [blame] | 25 | #include <linux/extable.h> |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 26 | #include <linux/uaccess.h> |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 27 | |
| 28 | #include <asm/mmu_context.h> |
| 29 | #include <asm/traps.h> |
| 30 | |
| 31 | #define EXC_SUPERV_INSN_ACCESS 9 /* Supervisor only instruction address */ |
| 32 | #define EXC_SUPERV_DATA_ACCESS 11 /* Supervisor only data address */ |
| 33 | #define EXC_X_PROTECTION_FAULT 13 /* TLB permission violation (x) */ |
| 34 | #define EXC_R_PROTECTION_FAULT 14 /* TLB permission violation (r) */ |
| 35 | #define EXC_W_PROTECTION_FAULT 15 /* TLB permission violation (w) */ |
| 36 | |
| 37 | /* |
| 38 | * This routine handles page faults. It determines the address, |
| 39 | * and the problem, and then passes it off to one of the appropriate |
| 40 | * routines. |
| 41 | */ |
| 42 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, |
| 43 | unsigned long address) |
| 44 | { |
| 45 | struct vm_area_struct *vma = NULL; |
| 46 | struct task_struct *tsk = current; |
| 47 | struct mm_struct *mm = tsk->mm; |
| 48 | int code = SEGV_MAPERR; |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 49 | vm_fault_t fault; |
Peter Xu | dde1607 | 2020-04-01 21:08:37 -0700 | [diff] [blame] | 50 | unsigned int flags = FAULT_FLAG_DEFAULT; |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 51 | |
| 52 | cause >>= 2; |
| 53 | |
| 54 | /* Restart the instruction */ |
| 55 | regs->ea -= 4; |
| 56 | |
| 57 | /* |
| 58 | * We fault-in kernel-space virtual memory on-demand. The |
| 59 | * 'reference' page table is init_mm.pgd. |
| 60 | * |
| 61 | * NOTE! We MUST NOT take any locks for this case. We may |
| 62 | * be in an interrupt or a critical region, and should |
| 63 | * only copy the information from the master page table, |
| 64 | * nothing more. |
| 65 | */ |
| 66 | if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) { |
| 67 | if (user_mode(regs)) |
| 68 | goto bad_area_nosemaphore; |
| 69 | else |
| 70 | goto vmalloc_fault; |
| 71 | } |
| 72 | |
| 73 | if (unlikely(address >= TASK_SIZE)) |
| 74 | goto bad_area_nosemaphore; |
| 75 | |
| 76 | /* |
| 77 | * If we're in an interrupt or have no user |
| 78 | * context, we must not take the fault.. |
| 79 | */ |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 80 | if (faulthandler_disabled() || !mm) |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 81 | goto bad_area_nosemaphore; |
| 82 | |
| 83 | if (user_mode(regs)) |
| 84 | flags |= FAULT_FLAG_USER; |
| 85 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame^] | 86 | if (!mmap_read_trylock(mm)) { |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 87 | if (!user_mode(regs) && !search_exception_tables(regs->ea)) |
| 88 | goto bad_area_nosemaphore; |
Ley Foon Tan | 96f3a5c | 2015-02-09 18:11:29 +0800 | [diff] [blame] | 89 | retry: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame^] | 90 | mmap_read_lock(mm); |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | vma = find_vma(mm, address); |
| 94 | if (!vma) |
| 95 | goto bad_area; |
| 96 | if (vma->vm_start <= address) |
| 97 | goto good_area; |
| 98 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 99 | goto bad_area; |
| 100 | if (expand_stack(vma, address)) |
| 101 | goto bad_area; |
| 102 | /* |
| 103 | * Ok, we have a good vm_area for this memory access, so |
| 104 | * we can handle it.. |
| 105 | */ |
| 106 | good_area: |
| 107 | code = SEGV_ACCERR; |
| 108 | |
| 109 | switch (cause) { |
| 110 | case EXC_SUPERV_INSN_ACCESS: |
| 111 | goto bad_area; |
| 112 | case EXC_SUPERV_DATA_ACCESS: |
| 113 | goto bad_area; |
| 114 | case EXC_X_PROTECTION_FAULT: |
| 115 | if (!(vma->vm_flags & VM_EXEC)) |
| 116 | goto bad_area; |
| 117 | break; |
| 118 | case EXC_R_PROTECTION_FAULT: |
| 119 | if (!(vma->vm_flags & VM_READ)) |
| 120 | goto bad_area; |
| 121 | break; |
| 122 | case EXC_W_PROTECTION_FAULT: |
| 123 | if (!(vma->vm_flags & VM_WRITE)) |
| 124 | goto bad_area; |
| 125 | flags = FAULT_FLAG_WRITE; |
| 126 | break; |
| 127 | } |
| 128 | |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 129 | /* |
| 130 | * If for any reason at all we couldn't handle the fault, |
| 131 | * make sure we exit gracefully rather than endlessly redo |
| 132 | * the fault. |
| 133 | */ |
Kirill A. Shutemov | dcddffd | 2016-07-26 15:25:18 -0700 | [diff] [blame] | 134 | fault = handle_mm_fault(vma, address, flags); |
Ley Foon Tan | 96f3a5c | 2015-02-09 18:11:29 +0800 | [diff] [blame] | 135 | |
Peter Xu | 4ef8732 | 2020-04-01 21:08:06 -0700 | [diff] [blame] | 136 | if (fault_signal_pending(fault, regs)) |
Ley Foon Tan | 96f3a5c | 2015-02-09 18:11:29 +0800 | [diff] [blame] | 137 | return; |
| 138 | |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 139 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 140 | if (fault & VM_FAULT_OOM) |
| 141 | goto out_of_memory; |
Linus Torvalds | 33692f2 | 2015-01-29 10:51:32 -0800 | [diff] [blame] | 142 | else if (fault & VM_FAULT_SIGSEGV) |
| 143 | goto bad_area; |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 144 | else if (fault & VM_FAULT_SIGBUS) |
| 145 | goto do_sigbus; |
| 146 | BUG(); |
| 147 | } |
Ley Foon Tan | 96f3a5c | 2015-02-09 18:11:29 +0800 | [diff] [blame] | 148 | |
| 149 | /* |
| 150 | * Major/minor page fault accounting is only done on the |
| 151 | * initial attempt. If we go through a retry, it is extremely |
| 152 | * likely that the page will be found in page cache at that point. |
| 153 | */ |
| 154 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| 155 | if (fault & VM_FAULT_MAJOR) |
| 156 | current->maj_flt++; |
| 157 | else |
| 158 | current->min_flt++; |
| 159 | if (fault & VM_FAULT_RETRY) { |
Ley Foon Tan | 96f3a5c | 2015-02-09 18:11:29 +0800 | [diff] [blame] | 160 | flags |= FAULT_FLAG_TRIED; |
| 161 | |
| 162 | /* |
| 163 | * No need to up_read(&mm->mmap_sem) as we would |
| 164 | * have already released it in __lock_page_or_retry |
| 165 | * in mm/filemap.c. |
| 166 | */ |
| 167 | |
| 168 | goto retry; |
| 169 | } |
| 170 | } |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 171 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame^] | 172 | mmap_read_unlock(mm); |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 173 | return; |
| 174 | |
| 175 | /* |
| 176 | * Something tried to access memory that isn't in our memory map.. |
| 177 | * Fix it, but check if it's kernel or user first.. |
| 178 | */ |
| 179 | bad_area: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame^] | 180 | mmap_read_unlock(mm); |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 181 | |
| 182 | bad_area_nosemaphore: |
| 183 | /* User mode accesses just cause a SIGSEGV */ |
| 184 | if (user_mode(regs)) { |
Chung-Ling Tang | a3248d6 | 2015-02-09 09:40:50 +0800 | [diff] [blame] | 185 | if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) { |
| 186 | pr_info("%s: unhandled page fault (%d) at 0x%08lx, " |
| 187 | "cause %ld\n", current->comm, SIGSEGV, address, cause); |
| 188 | show_regs(regs); |
| 189 | } |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 190 | _exception(SIGSEGV, regs, code, address); |
| 191 | return; |
| 192 | } |
| 193 | |
| 194 | no_context: |
| 195 | /* Are we prepared to handle this kernel fault? */ |
| 196 | if (fixup_exception(regs)) |
| 197 | return; |
| 198 | |
| 199 | /* |
| 200 | * Oops. The kernel tried to access some bad page. We'll have to |
| 201 | * terminate things with extreme prejudice. |
| 202 | */ |
| 203 | bust_spinlocks(1); |
| 204 | |
| 205 | pr_alert("Unable to handle kernel %s at virtual address %08lx", |
| 206 | address < PAGE_SIZE ? "NULL pointer dereference" : |
| 207 | "paging request", address); |
| 208 | pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra, |
| 209 | cause); |
| 210 | panic("Oops"); |
| 211 | return; |
| 212 | |
| 213 | /* |
| 214 | * We ran out of memory, or some other thing happened to us that made |
| 215 | * us unable to handle the page fault gracefully. |
| 216 | */ |
| 217 | out_of_memory: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame^] | 218 | mmap_read_unlock(mm); |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 219 | if (!user_mode(regs)) |
| 220 | goto no_context; |
| 221 | pagefault_out_of_memory(); |
| 222 | return; |
| 223 | |
| 224 | do_sigbus: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame^] | 225 | mmap_read_unlock(mm); |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 226 | |
| 227 | /* Kernel mode? Handle exceptions or die */ |
| 228 | if (!user_mode(regs)) |
| 229 | goto no_context; |
| 230 | |
| 231 | _exception(SIGBUS, regs, BUS_ADRERR, address); |
| 232 | return; |
| 233 | |
| 234 | vmalloc_fault: |
| 235 | { |
| 236 | /* |
| 237 | * Synchronize this task's top level page-table |
| 238 | * with the 'reference' page table. |
| 239 | * |
| 240 | * Do _not_ use "tsk" here. We might be inside |
| 241 | * an interrupt in the middle of a task switch.. |
| 242 | */ |
| 243 | int offset = pgd_index(address); |
| 244 | pgd_t *pgd, *pgd_k; |
Mike Rapoport | 9f4e703 | 2020-06-04 16:46:35 -0700 | [diff] [blame] | 245 | p4d_t *p4d, *p4d_k; |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 246 | pud_t *pud, *pud_k; |
| 247 | pmd_t *pmd, *pmd_k; |
| 248 | pte_t *pte_k; |
| 249 | |
| 250 | pgd = pgd_current + offset; |
| 251 | pgd_k = init_mm.pgd + offset; |
| 252 | |
| 253 | if (!pgd_present(*pgd_k)) |
| 254 | goto no_context; |
| 255 | set_pgd(pgd, *pgd_k); |
| 256 | |
Mike Rapoport | 9f4e703 | 2020-06-04 16:46:35 -0700 | [diff] [blame] | 257 | p4d = p4d_offset(pgd, address); |
| 258 | p4d_k = p4d_offset(pgd_k, address); |
| 259 | if (!p4d_present(*p4d_k)) |
| 260 | goto no_context; |
| 261 | pud = pud_offset(p4d, address); |
| 262 | pud_k = pud_offset(p4d_k, address); |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 263 | if (!pud_present(*pud_k)) |
| 264 | goto no_context; |
| 265 | pmd = pmd_offset(pud, address); |
| 266 | pmd_k = pmd_offset(pud_k, address); |
| 267 | if (!pmd_present(*pmd_k)) |
| 268 | goto no_context; |
| 269 | set_pmd(pmd, *pmd_k); |
| 270 | |
| 271 | pte_k = pte_offset_kernel(pmd_k, address); |
| 272 | if (!pte_present(*pte_k)) |
| 273 | goto no_context; |
| 274 | |
Nicholas Piggin | 195568a | 2018-11-05 10:00:15 +0800 | [diff] [blame] | 275 | flush_tlb_kernel_page(address); |
Ley Foon Tan | 862674d | 2014-11-06 15:19:44 +0800 | [diff] [blame] | 276 | return; |
| 277 | } |
| 278 | } |