Thomas Gleixner | 08dbd0f | 2019-05-29 07:12:41 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Memory fault handling for Hexagon |
| 4 | * |
Richard Kuo | e1858b2 | 2012-09-19 16:22:02 -0500 | [diff] [blame] | 5 | * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | /* |
| 9 | * Page fault handling for the Hexagon Virtual Machine. |
| 10 | * Can also be called by a native port emulating the HVM |
| 11 | * execptions. |
| 12 | */ |
| 13 | |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 14 | #include <asm/traps.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 15 | #include <linux/uaccess.h> |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 16 | #include <linux/mm.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 17 | #include <linux/sched/signal.h> |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 18 | #include <linux/signal.h> |
Paul Gortmaker | 1e8fb9c | 2017-01-10 14:13:29 -0500 | [diff] [blame] | 19 | #include <linux/extable.h> |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 20 | #include <linux/hardirq.h> |
Peter Xu | e08157c | 2020-08-11 18:38:03 -0700 | [diff] [blame] | 21 | #include <linux/perf_event.h> |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 22 | |
| 23 | /* |
| 24 | * Decode of hardware exception sends us to one of several |
| 25 | * entry points. At each, we generate canonical arguments |
| 26 | * for handling by the abstract memory management code. |
| 27 | */ |
| 28 | #define FLT_IFETCH -1 |
| 29 | #define FLT_LOAD 0 |
| 30 | #define FLT_STORE 1 |
| 31 | |
| 32 | |
| 33 | /* |
| 34 | * Canonical page fault handler |
| 35 | */ |
| 36 | void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) |
| 37 | { |
| 38 | struct vm_area_struct *vma; |
| 39 | struct mm_struct *mm = current->mm; |
Eric W. Biederman | 1a4bd97 | 2018-04-16 11:26:58 -0500 | [diff] [blame] | 40 | int si_signo; |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 41 | int si_code = SEGV_MAPERR; |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 42 | vm_fault_t fault; |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 43 | const struct exception_table_entry *fixup; |
Peter Xu | dde1607 | 2020-04-01 21:08:37 -0700 | [diff] [blame] | 44 | unsigned int flags = FAULT_FLAG_DEFAULT; |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 45 | |
| 46 | /* |
| 47 | * If we're in an interrupt or have no user context, |
| 48 | * then must not take the fault. |
| 49 | */ |
| 50 | if (unlikely(in_interrupt() || !mm)) |
| 51 | goto no_context; |
| 52 | |
| 53 | local_irq_enable(); |
| 54 | |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 55 | if (user_mode(regs)) |
| 56 | flags |= FAULT_FLAG_USER; |
Peter Xu | e08157c | 2020-08-11 18:38:03 -0700 | [diff] [blame] | 57 | |
| 58 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
Kautuk Consul | 393a86a | 2012-03-20 09:23:33 -0400 | [diff] [blame] | 59 | retry: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 60 | mmap_read_lock(mm); |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 61 | vma = find_vma(mm, address); |
| 62 | if (!vma) |
| 63 | goto bad_area; |
| 64 | |
| 65 | if (vma->vm_start <= address) |
| 66 | goto good_area; |
| 67 | |
| 68 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 69 | goto bad_area; |
| 70 | |
| 71 | if (expand_stack(vma, address)) |
| 72 | goto bad_area; |
| 73 | |
| 74 | good_area: |
| 75 | /* Address space is OK. Now check access rights. */ |
| 76 | si_code = SEGV_ACCERR; |
| 77 | |
| 78 | switch (cause) { |
| 79 | case FLT_IFETCH: |
| 80 | if (!(vma->vm_flags & VM_EXEC)) |
| 81 | goto bad_area; |
| 82 | break; |
| 83 | case FLT_LOAD: |
| 84 | if (!(vma->vm_flags & VM_READ)) |
| 85 | goto bad_area; |
| 86 | break; |
| 87 | case FLT_STORE: |
| 88 | if (!(vma->vm_flags & VM_WRITE)) |
| 89 | goto bad_area; |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 90 | flags |= FAULT_FLAG_WRITE; |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 91 | break; |
| 92 | } |
| 93 | |
Peter Xu | e08157c | 2020-08-11 18:38:03 -0700 | [diff] [blame] | 94 | fault = handle_mm_fault(vma, address, flags, regs); |
Kautuk Consul | 393a86a | 2012-03-20 09:23:33 -0400 | [diff] [blame] | 95 | |
Peter Xu | 4ef8732 | 2020-04-01 21:08:06 -0700 | [diff] [blame] | 96 | if (fault_signal_pending(fault, regs)) |
Kautuk Consul | 393a86a | 2012-03-20 09:23:33 -0400 | [diff] [blame] | 97 | return; |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 98 | |
| 99 | /* The most common case -- we are done. */ |
| 100 | if (likely(!(fault & VM_FAULT_ERROR))) { |
Qi Zheng | 36ef159 | 2022-01-14 14:05:51 -0800 | [diff] [blame^] | 101 | if (fault & VM_FAULT_RETRY) { |
| 102 | flags |= FAULT_FLAG_TRIED; |
| 103 | goto retry; |
Kautuk Consul | 393a86a | 2012-03-20 09:23:33 -0400 | [diff] [blame] | 104 | } |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 105 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 106 | mmap_read_unlock(mm); |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 107 | return; |
| 108 | } |
| 109 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 110 | mmap_read_unlock(mm); |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 111 | |
| 112 | /* Handle copyin/out exception cases */ |
| 113 | if (!user_mode(regs)) |
| 114 | goto no_context; |
| 115 | |
| 116 | if (fault & VM_FAULT_OOM) { |
| 117 | pagefault_out_of_memory(); |
| 118 | return; |
| 119 | } |
| 120 | |
| 121 | /* User-mode address is in the memory map, but we are |
| 122 | * unable to fix up the page fault. |
| 123 | */ |
| 124 | if (fault & VM_FAULT_SIGBUS) { |
Eric W. Biederman | 1a4bd97 | 2018-04-16 11:26:58 -0500 | [diff] [blame] | 125 | si_signo = SIGBUS; |
| 126 | si_code = BUS_ADRERR; |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 127 | } |
| 128 | /* Address is not in the memory map */ |
| 129 | else { |
Eric W. Biederman | 1a4bd97 | 2018-04-16 11:26:58 -0500 | [diff] [blame] | 130 | si_signo = SIGSEGV; |
| 131 | si_code = SEGV_ACCERR; |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 132 | } |
Eric W. Biederman | 2e1661d2 | 2019-05-23 11:04:24 -0500 | [diff] [blame] | 133 | force_sig_fault(si_signo, si_code, (void __user *)address); |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 134 | return; |
| 135 | |
| 136 | bad_area: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 137 | mmap_read_unlock(mm); |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 138 | |
| 139 | if (user_mode(regs)) { |
Eric W. Biederman | 2e1661d2 | 2019-05-23 11:04:24 -0500 | [diff] [blame] | 140 | force_sig_fault(SIGSEGV, si_code, (void __user *)address); |
Richard Kuo | 499236d | 2011-10-31 18:54:08 -0500 | [diff] [blame] | 141 | return; |
| 142 | } |
| 143 | /* Kernel-mode fault falls through */ |
| 144 | |
| 145 | no_context: |
| 146 | fixup = search_exception_tables(pt_elr(regs)); |
| 147 | if (fixup) { |
| 148 | pt_set_elr(regs, fixup->fixup); |
| 149 | return; |
| 150 | } |
| 151 | |
| 152 | /* Things are looking very, very bad now */ |
| 153 | bust_spinlocks(1); |
| 154 | printk(KERN_EMERG "Unable to handle kernel paging request at " |
| 155 | "virtual address 0x%08lx, regs %p\n", address, regs); |
| 156 | die("Bad Kernel VA", regs, SIGKILL); |
| 157 | } |
| 158 | |
| 159 | |
| 160 | void read_protection_fault(struct pt_regs *regs) |
| 161 | { |
| 162 | unsigned long badvadr = pt_badva(regs); |
| 163 | |
| 164 | do_page_fault(badvadr, FLT_LOAD, regs); |
| 165 | } |
| 166 | |
| 167 | void write_protection_fault(struct pt_regs *regs) |
| 168 | { |
| 169 | unsigned long badvadr = pt_badva(regs); |
| 170 | |
| 171 | do_page_fault(badvadr, FLT_STORE, regs); |
| 172 | } |
| 173 | |
| 174 | void execute_protection_fault(struct pt_regs *regs) |
| 175 | { |
| 176 | unsigned long badvadr = pt_badva(regs); |
| 177 | |
| 178 | do_page_fault(badvadr, FLT_IFETCH, regs); |
| 179 | } |