Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation |
| 3 | |
| 4 | #include <linux/extable.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/signal.h> |
| 7 | #include <linux/ptrace.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/hardirq.h> |
| 11 | #include <linux/uaccess.h> |
Nickhu | ebd0975 | 2018-10-25 10:24:15 +0800 | [diff] [blame] | 12 | #include <linux/perf_event.h> |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 13 | |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 14 | #include <asm/tlbflush.h> |
| 15 | |
Eric W. Biederman | 9fd5a04 | 2021-10-20 12:43:48 -0500 | [diff] [blame] | 16 | extern void __noreturn die(const char *str, struct pt_regs *regs, long err); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 17 | |
| 18 | /* |
| 19 | * This is useful to dump out the page tables associated with |
| 20 | * 'addr' in mm 'mm'. |
| 21 | */ |
| 22 | void show_pte(struct mm_struct *mm, unsigned long addr) |
| 23 | { |
| 24 | pgd_t *pgd; |
| 25 | if (!mm) |
| 26 | mm = &init_mm; |
| 27 | |
| 28 | pr_alert("pgd = %p\n", mm->pgd); |
| 29 | pgd = pgd_offset(mm, addr); |
| 30 | pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); |
| 31 | |
| 32 | do { |
Mike Rapoport | 7c2763c | 2019-12-04 16:54:08 -0800 | [diff] [blame] | 33 | p4d_t *p4d; |
| 34 | pud_t *pud; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 35 | pmd_t *pmd; |
| 36 | |
| 37 | if (pgd_none(*pgd)) |
| 38 | break; |
| 39 | |
| 40 | if (pgd_bad(*pgd)) { |
| 41 | pr_alert("(bad)"); |
| 42 | break; |
| 43 | } |
| 44 | |
Mike Rapoport | 7c2763c | 2019-12-04 16:54:08 -0800 | [diff] [blame] | 45 | p4d = p4d_offset(pgd, addr); |
| 46 | pud = pud_offset(p4d, addr); |
| 47 | pmd = pmd_offset(pud, addr); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 48 | #if PTRS_PER_PMD != 1 |
| 49 | pr_alert(", *pmd=%08lx", pmd_val(*pmd)); |
| 50 | #endif |
| 51 | |
| 52 | if (pmd_none(*pmd)) |
| 53 | break; |
| 54 | |
| 55 | if (pmd_bad(*pmd)) { |
| 56 | pr_alert("(bad)"); |
| 57 | break; |
| 58 | } |
| 59 | |
| 60 | if (IS_ENABLED(CONFIG_HIGHMEM)) |
| 61 | { |
| 62 | pte_t *pte; |
| 63 | /* We must not map this if we have highmem enabled */ |
| 64 | pte = pte_offset_map(pmd, addr); |
| 65 | pr_alert(", *pte=%08lx", pte_val(*pte)); |
| 66 | pte_unmap(pte); |
| 67 | } |
| 68 | } while (0); |
| 69 | |
| 70 | pr_alert("\n"); |
| 71 | } |
| 72 | |
| 73 | void do_page_fault(unsigned long entry, unsigned long addr, |
| 74 | unsigned int error_code, struct pt_regs *regs) |
| 75 | { |
| 76 | struct task_struct *tsk; |
| 77 | struct mm_struct *mm; |
| 78 | struct vm_area_struct *vma; |
Eric W. Biederman | d808e91 | 2018-04-16 14:58:34 -0500 | [diff] [blame] | 79 | int si_code; |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 80 | vm_fault_t fault; |
Anshuman Khandual | 6cb4d9a | 2020-04-10 14:33:09 -0700 | [diff] [blame] | 81 | unsigned int mask = VM_ACCESS_FLAGS; |
Peter Xu | dde1607 | 2020-04-01 21:08:37 -0700 | [diff] [blame] | 82 | unsigned int flags = FAULT_FLAG_DEFAULT; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 83 | |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 84 | error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE); |
| 85 | tsk = current; |
| 86 | mm = tsk->mm; |
Eric W. Biederman | d808e91 | 2018-04-16 14:58:34 -0500 | [diff] [blame] | 87 | si_code = SEGV_MAPERR; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 88 | /* |
| 89 | * We fault-in kernel-space virtual memory on-demand. The |
| 90 | * 'reference' page table is init_mm.pgd. |
| 91 | * |
| 92 | * NOTE! We MUST NOT take any locks for this case. We may |
| 93 | * be in an interrupt or a critical region, and should |
| 94 | * only copy the information from the master page table, |
| 95 | * nothing more. |
| 96 | */ |
| 97 | if (addr >= TASK_SIZE) { |
| 98 | if (user_mode(regs)) |
| 99 | goto bad_area_nosemaphore; |
| 100 | |
| 101 | if (addr >= TASK_SIZE && addr < VMALLOC_END |
| 102 | && (entry == ENTRY_PTE_NOT_PRESENT)) |
| 103 | goto vmalloc_fault; |
| 104 | else |
| 105 | goto no_context; |
| 106 | } |
| 107 | |
| 108 | /* Send a signal to the task for handling the unalignment access. */ |
| 109 | if (entry == ENTRY_GENERAL_EXCPETION |
| 110 | && error_code == ETYPE_ALIGNMENT_CHECK) { |
| 111 | if (user_mode(regs)) |
| 112 | goto bad_area_nosemaphore; |
| 113 | else |
| 114 | goto no_context; |
| 115 | } |
| 116 | |
| 117 | /* |
| 118 | * If we're in an interrupt or have no user |
| 119 | * context, we must not take the fault.. |
| 120 | */ |
| 121 | if (unlikely(faulthandler_disabled() || !mm)) |
| 122 | goto no_context; |
| 123 | |
Peter Xu | daf7bf5 | 2020-08-11 18:38:19 -0700 | [diff] [blame] | 124 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
| 125 | |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 126 | /* |
| 127 | * As per x86, we may deadlock here. However, since the kernel only |
| 128 | * validly references user space from well defined areas of the code, |
| 129 | * we can bug out early if this is from code which shouldn't. |
| 130 | */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 131 | if (unlikely(!mmap_read_trylock(mm))) { |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 132 | if (!user_mode(regs) && |
| 133 | !search_exception_tables(instruction_pointer(regs))) |
| 134 | goto no_context; |
| 135 | retry: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 136 | mmap_read_lock(mm); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 137 | } else { |
| 138 | /* |
| 139 | * The above down_read_trylock() might have succeeded in which |
| 140 | * case, we'll have missed the might_sleep() from down_read(). |
| 141 | */ |
| 142 | might_sleep(); |
| 143 | if (IS_ENABLED(CONFIG_DEBUG_VM)) { |
| 144 | if (!user_mode(regs) && |
| 145 | !search_exception_tables(instruction_pointer(regs))) |
| 146 | goto no_context; |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | vma = find_vma(mm, addr); |
| 151 | |
| 152 | if (unlikely(!vma)) |
| 153 | goto bad_area; |
| 154 | |
| 155 | if (vma->vm_start <= addr) |
| 156 | goto good_area; |
| 157 | |
| 158 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) |
| 159 | goto bad_area; |
| 160 | |
| 161 | if (unlikely(expand_stack(vma, addr))) |
| 162 | goto bad_area; |
| 163 | |
| 164 | /* |
| 165 | * Ok, we have a good vm_area for this memory access, so |
| 166 | * we can handle it.. |
| 167 | */ |
| 168 | |
| 169 | good_area: |
Eric W. Biederman | d808e91 | 2018-04-16 14:58:34 -0500 | [diff] [blame] | 170 | si_code = SEGV_ACCERR; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 171 | |
| 172 | /* first do some preliminary protection checks */ |
| 173 | if (entry == ENTRY_PTE_NOT_PRESENT) { |
| 174 | if (error_code & ITYPE_mskINST) |
| 175 | mask = VM_EXEC; |
| 176 | else { |
| 177 | mask = VM_READ | VM_WRITE; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 178 | } |
| 179 | } else if (entry == ENTRY_TLB_MISC) { |
| 180 | switch (error_code & ITYPE_mskETYPE) { |
| 181 | case RD_PROT: |
| 182 | mask = VM_READ; |
| 183 | break; |
| 184 | case WRT_PROT: |
| 185 | mask = VM_WRITE; |
| 186 | flags |= FAULT_FLAG_WRITE; |
| 187 | break; |
| 188 | case NOEXEC: |
| 189 | mask = VM_EXEC; |
| 190 | break; |
| 191 | case PAGE_MODIFY: |
| 192 | mask = VM_WRITE; |
| 193 | flags |= FAULT_FLAG_WRITE; |
| 194 | break; |
| 195 | case ACC_BIT: |
| 196 | BUG(); |
| 197 | default: |
| 198 | break; |
| 199 | } |
| 200 | |
| 201 | } |
| 202 | if (!(vma->vm_flags & mask)) |
| 203 | goto bad_area; |
| 204 | |
| 205 | /* |
| 206 | * If for any reason at all we couldn't handle the fault, |
| 207 | * make sure we exit gracefully rather than endlessly redo |
| 208 | * the fault. |
| 209 | */ |
| 210 | |
Peter Xu | daf7bf5 | 2020-08-11 18:38:19 -0700 | [diff] [blame] | 211 | fault = handle_mm_fault(vma, addr, flags, regs); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 212 | |
| 213 | /* |
| 214 | * If we need to retry but a fatal signal is pending, handle the |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 215 | * signal first. We do not need to release the mmap_lock because it |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 216 | * would already be released in __lock_page_or_retry in mm/filemap.c. |
| 217 | */ |
Peter Xu | 4ef8732 | 2020-04-01 21:08:06 -0700 | [diff] [blame] | 218 | if (fault_signal_pending(fault, regs)) { |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 219 | if (!user_mode(regs)) |
| 220 | goto no_context; |
| 221 | return; |
| 222 | } |
| 223 | |
| 224 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 225 | if (fault & VM_FAULT_OOM) |
| 226 | goto out_of_memory; |
| 227 | else if (fault & VM_FAULT_SIGBUS) |
| 228 | goto do_sigbus; |
| 229 | else |
| 230 | goto bad_area; |
| 231 | } |
| 232 | |
Qi Zheng | 36ef159 | 2022-01-14 14:05:51 -0800 | [diff] [blame] | 233 | if (fault & VM_FAULT_RETRY) { |
| 234 | flags |= FAULT_FLAG_TRIED; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 235 | |
Qi Zheng | 36ef159 | 2022-01-14 14:05:51 -0800 | [diff] [blame] | 236 | /* No need to mmap_read_unlock(mm) as we would |
| 237 | * have already released it in __lock_page_or_retry |
| 238 | * in mm/filemap.c. |
| 239 | */ |
| 240 | goto retry; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 241 | } |
| 242 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 243 | mmap_read_unlock(mm); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 244 | return; |
| 245 | |
| 246 | /* |
| 247 | * Something tried to access memory that isn't in our memory map.. |
| 248 | * Fix it, but check if it's kernel or user first.. |
| 249 | */ |
| 250 | bad_area: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 251 | mmap_read_unlock(mm); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 252 | |
| 253 | bad_area_nosemaphore: |
| 254 | |
| 255 | /* User mode accesses just cause a SIGSEGV */ |
| 256 | |
| 257 | if (user_mode(regs)) { |
| 258 | tsk->thread.address = addr; |
| 259 | tsk->thread.error_code = error_code; |
| 260 | tsk->thread.trap_no = entry; |
Eric W. Biederman | 2e1661d2 | 2019-05-23 11:04:24 -0500 | [diff] [blame] | 261 | force_sig_fault(SIGSEGV, si_code, (void __user *)addr); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 262 | return; |
| 263 | } |
| 264 | |
| 265 | no_context: |
| 266 | |
| 267 | /* Are we prepared to handle this kernel fault? |
| 268 | * |
| 269 | * (The kernel has valid exception-points in the source |
| 270 | * when it acesses user-memory. When it fails in one |
| 271 | * of those points, we find it in a table and do a jump |
| 272 | * to some fixup code that loads an appropriate error |
| 273 | * code) |
| 274 | */ |
| 275 | |
| 276 | { |
| 277 | const struct exception_table_entry *entry; |
| 278 | |
| 279 | if ((entry = |
| 280 | search_exception_tables(instruction_pointer(regs))) != |
| 281 | NULL) { |
| 282 | /* Adjust the instruction pointer in the stackframe */ |
| 283 | instruction_pointer(regs) = entry->fixup; |
| 284 | return; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Oops. The kernel tried to access some bad page. We'll have to |
| 290 | * terminate things with extreme prejudice. |
| 291 | */ |
| 292 | |
| 293 | bust_spinlocks(1); |
| 294 | pr_alert("Unable to handle kernel %s at virtual address %08lx\n", |
| 295 | (addr < PAGE_SIZE) ? "NULL pointer dereference" : |
| 296 | "paging request", addr); |
| 297 | |
| 298 | show_pte(mm, addr); |
| 299 | die("Oops", regs, error_code); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 300 | |
| 301 | /* |
| 302 | * We ran out of memory, or some other thing happened to us that made |
| 303 | * us unable to handle the page fault gracefully. |
| 304 | */ |
| 305 | |
| 306 | out_of_memory: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 307 | mmap_read_unlock(mm); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 308 | if (!user_mode(regs)) |
| 309 | goto no_context; |
| 310 | pagefault_out_of_memory(); |
| 311 | return; |
| 312 | |
| 313 | do_sigbus: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 314 | mmap_read_unlock(mm); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 315 | |
| 316 | /* Kernel mode? Handle exceptions or die */ |
| 317 | if (!user_mode(regs)) |
| 318 | goto no_context; |
| 319 | |
| 320 | /* |
| 321 | * Send a sigbus |
| 322 | */ |
| 323 | tsk->thread.address = addr; |
| 324 | tsk->thread.error_code = error_code; |
| 325 | tsk->thread.trap_no = entry; |
Eric W. Biederman | 2e1661d2 | 2019-05-23 11:04:24 -0500 | [diff] [blame] | 326 | force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 327 | |
| 328 | return; |
| 329 | |
| 330 | vmalloc_fault: |
| 331 | { |
| 332 | /* |
| 333 | * Synchronize this task's top level page-table |
| 334 | * with the 'reference' page table. |
| 335 | * |
| 336 | * Use current_pgd instead of tsk->active_mm->pgd |
| 337 | * since the latter might be unavailable if this |
| 338 | * code is executed in a misfortunately run irq |
| 339 | * (like inside schedule() between switch_mm and |
| 340 | * switch_to...). |
| 341 | */ |
| 342 | |
| 343 | unsigned int index = pgd_index(addr); |
| 344 | pgd_t *pgd, *pgd_k; |
Mike Rapoport | 7c2763c | 2019-12-04 16:54:08 -0800 | [diff] [blame] | 345 | p4d_t *p4d, *p4d_k; |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 346 | pud_t *pud, *pud_k; |
| 347 | pmd_t *pmd, *pmd_k; |
| 348 | pte_t *pte_k; |
| 349 | |
| 350 | pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index; |
| 351 | pgd_k = init_mm.pgd + index; |
| 352 | |
| 353 | if (!pgd_present(*pgd_k)) |
| 354 | goto no_context; |
| 355 | |
Mike Rapoport | 7c2763c | 2019-12-04 16:54:08 -0800 | [diff] [blame] | 356 | p4d = p4d_offset(pgd, addr); |
| 357 | p4d_k = p4d_offset(pgd_k, addr); |
| 358 | if (!p4d_present(*p4d_k)) |
| 359 | goto no_context; |
| 360 | |
| 361 | pud = pud_offset(p4d, addr); |
| 362 | pud_k = pud_offset(p4d_k, addr); |
Greentime Hu | 664eec4 | 2017-10-24 15:25:00 +0800 | [diff] [blame] | 363 | if (!pud_present(*pud_k)) |
| 364 | goto no_context; |
| 365 | |
| 366 | pmd = pmd_offset(pud, addr); |
| 367 | pmd_k = pmd_offset(pud_k, addr); |
| 368 | if (!pmd_present(*pmd_k)) |
| 369 | goto no_context; |
| 370 | |
| 371 | if (!pmd_present(*pmd)) |
| 372 | set_pmd(pmd, *pmd_k); |
| 373 | else |
| 374 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
| 375 | |
| 376 | /* |
| 377 | * Since the vmalloc area is global, we don't |
| 378 | * need to copy individual PTE's, it is enough to |
| 379 | * copy the pgd pointer into the pte page of the |
| 380 | * root task. If that is there, we'll find our pte if |
| 381 | * it exists. |
| 382 | */ |
| 383 | |
| 384 | /* Make sure the actual PTE exists as well to |
| 385 | * catch kernel vmalloc-area accesses to non-mapped |
| 386 | * addres. If we don't do this, this will just |
| 387 | * silently loop forever. |
| 388 | */ |
| 389 | |
| 390 | pte_k = pte_offset_kernel(pmd_k, addr); |
| 391 | if (!pte_present(*pte_k)) |
| 392 | goto no_context; |
| 393 | |
| 394 | return; |
| 395 | } |
| 396 | } |