blob: 4fac4b9eb3164f20444e19305aa32abf35d6773a [file] [log] [blame]
Thomas Gleixner08dbd0f2019-05-29 07:12:41 -07001// SPDX-License-Identifier: GPL-2.0-only
Richard Kuo499236d2011-10-31 18:54:08 -05002/*
3 * Memory fault handling for Hexagon
4 *
Richard Kuoe1858b22012-09-19 16:22:02 -05005 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
Richard Kuo499236d2011-10-31 18:54:08 -05006 */
7
8/*
9 * Page fault handling for the Hexagon Virtual Machine.
10 * Can also be called by a native port emulating the HVM
11 * execptions.
12 */
13
Richard Kuo499236d2011-10-31 18:54:08 -050014#include <asm/traps.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080015#include <linux/uaccess.h>
Richard Kuo499236d2011-10-31 18:54:08 -050016#include <linux/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010017#include <linux/sched/signal.h>
Richard Kuo499236d2011-10-31 18:54:08 -050018#include <linux/signal.h>
Paul Gortmaker1e8fb9c2017-01-10 14:13:29 -050019#include <linux/extable.h>
Richard Kuo499236d2011-10-31 18:54:08 -050020#include <linux/hardirq.h>
Peter Xue08157c2020-08-11 18:38:03 -070021#include <linux/perf_event.h>
Richard Kuo499236d2011-10-31 18:54:08 -050022
23/*
24 * Decode of hardware exception sends us to one of several
25 * entry points. At each, we generate canonical arguments
26 * for handling by the abstract memory management code.
27 */
28#define FLT_IFETCH -1
29#define FLT_LOAD 0
30#define FLT_STORE 1
31
32
33/*
34 * Canonical page fault handler
35 */
36void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
37{
38 struct vm_area_struct *vma;
39 struct mm_struct *mm = current->mm;
Eric W. Biederman1a4bd972018-04-16 11:26:58 -050040 int si_signo;
Richard Kuo499236d2011-10-31 18:54:08 -050041 int si_code = SEGV_MAPERR;
Souptick Joarder50a7ca32018-08-17 15:44:47 -070042 vm_fault_t fault;
Richard Kuo499236d2011-10-31 18:54:08 -050043 const struct exception_table_entry *fixup;
Peter Xudde16072020-04-01 21:08:37 -070044 unsigned int flags = FAULT_FLAG_DEFAULT;
Richard Kuo499236d2011-10-31 18:54:08 -050045
46 /*
47 * If we're in an interrupt or have no user context,
48 * then must not take the fault.
49 */
50 if (unlikely(in_interrupt() || !mm))
51 goto no_context;
52
53 local_irq_enable();
54
Johannes Weiner759496b2013-09-12 15:13:39 -070055 if (user_mode(regs))
56 flags |= FAULT_FLAG_USER;
Peter Xue08157c2020-08-11 18:38:03 -070057
58 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Kautuk Consul393a86a2012-03-20 09:23:33 -040059retry:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -070060 mmap_read_lock(mm);
Richard Kuo499236d2011-10-31 18:54:08 -050061 vma = find_vma(mm, address);
62 if (!vma)
63 goto bad_area;
64
65 if (vma->vm_start <= address)
66 goto good_area;
67
68 if (!(vma->vm_flags & VM_GROWSDOWN))
69 goto bad_area;
70
71 if (expand_stack(vma, address))
72 goto bad_area;
73
74good_area:
75 /* Address space is OK. Now check access rights. */
76 si_code = SEGV_ACCERR;
77
78 switch (cause) {
79 case FLT_IFETCH:
80 if (!(vma->vm_flags & VM_EXEC))
81 goto bad_area;
82 break;
83 case FLT_LOAD:
84 if (!(vma->vm_flags & VM_READ))
85 goto bad_area;
86 break;
87 case FLT_STORE:
88 if (!(vma->vm_flags & VM_WRITE))
89 goto bad_area;
Johannes Weiner759496b2013-09-12 15:13:39 -070090 flags |= FAULT_FLAG_WRITE;
Richard Kuo499236d2011-10-31 18:54:08 -050091 break;
92 }
93
Peter Xue08157c2020-08-11 18:38:03 -070094 fault = handle_mm_fault(vma, address, flags, regs);
Kautuk Consul393a86a2012-03-20 09:23:33 -040095
Peter Xu4ef87322020-04-01 21:08:06 -070096 if (fault_signal_pending(fault, regs))
Kautuk Consul393a86a2012-03-20 09:23:33 -040097 return;
Richard Kuo499236d2011-10-31 18:54:08 -050098
99 /* The most common case -- we are done. */
100 if (likely(!(fault & VM_FAULT_ERROR))) {
Qi Zheng36ef1592022-01-14 14:05:51 -0800101 if (fault & VM_FAULT_RETRY) {
102 flags |= FAULT_FLAG_TRIED;
103 goto retry;
Kautuk Consul393a86a2012-03-20 09:23:33 -0400104 }
Richard Kuo499236d2011-10-31 18:54:08 -0500105
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700106 mmap_read_unlock(mm);
Richard Kuo499236d2011-10-31 18:54:08 -0500107 return;
108 }
109
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700110 mmap_read_unlock(mm);
Richard Kuo499236d2011-10-31 18:54:08 -0500111
112 /* Handle copyin/out exception cases */
113 if (!user_mode(regs))
114 goto no_context;
115
116 if (fault & VM_FAULT_OOM) {
117 pagefault_out_of_memory();
118 return;
119 }
120
121 /* User-mode address is in the memory map, but we are
122 * unable to fix up the page fault.
123 */
124 if (fault & VM_FAULT_SIGBUS) {
Eric W. Biederman1a4bd972018-04-16 11:26:58 -0500125 si_signo = SIGBUS;
126 si_code = BUS_ADRERR;
Richard Kuo499236d2011-10-31 18:54:08 -0500127 }
128 /* Address is not in the memory map */
129 else {
Eric W. Biederman1a4bd972018-04-16 11:26:58 -0500130 si_signo = SIGSEGV;
131 si_code = SEGV_ACCERR;
Richard Kuo499236d2011-10-31 18:54:08 -0500132 }
Eric W. Biederman2e1661d22019-05-23 11:04:24 -0500133 force_sig_fault(si_signo, si_code, (void __user *)address);
Richard Kuo499236d2011-10-31 18:54:08 -0500134 return;
135
136bad_area:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700137 mmap_read_unlock(mm);
Richard Kuo499236d2011-10-31 18:54:08 -0500138
139 if (user_mode(regs)) {
Eric W. Biederman2e1661d22019-05-23 11:04:24 -0500140 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
Richard Kuo499236d2011-10-31 18:54:08 -0500141 return;
142 }
143 /* Kernel-mode fault falls through */
144
145no_context:
146 fixup = search_exception_tables(pt_elr(regs));
147 if (fixup) {
148 pt_set_elr(regs, fixup->fixup);
149 return;
150 }
151
152 /* Things are looking very, very bad now */
153 bust_spinlocks(1);
154 printk(KERN_EMERG "Unable to handle kernel paging request at "
155 "virtual address 0x%08lx, regs %p\n", address, regs);
156 die("Bad Kernel VA", regs, SIGKILL);
157}
158
159
160void read_protection_fault(struct pt_regs *regs)
161{
162 unsigned long badvadr = pt_badva(regs);
163
164 do_page_fault(badvadr, FLT_LOAD, regs);
165}
166
167void write_protection_fault(struct pt_regs *regs)
168{
169 unsigned long badvadr = pt_badva(regs);
170
171 do_page_fault(badvadr, FLT_STORE, regs);
172}
173
174void execute_protection_fault(struct pt_regs *regs)
175{
176 unsigned long badvadr = pt_badva(regs);
177
178 do_page_fault(badvadr, FLT_IFETCH, regs);
179}