blob: 179cf92a56e56277624bf417b28182641b002117 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
Heiko Carstens052ff462011-01-05 12:47:28 +010012#include <linux/kernel_stat.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020013#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/signal.h>
15#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010016#include <linux/sched/debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
Heiko Carstens77575912009-06-12 10:26:25 +020024#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/smp.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070026#include <linux/kdebug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/console.h>
Paul Gortmakerdcc096c2016-09-19 17:54:56 -040029#include <linux/extable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/hardirq.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020031#include <linux/kprobes.h>
Martin Schwidefskybe5ec362007-04-27 16:01:44 +020032#include <linux/uaccess.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020033#include <linux/hugetlb.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010034#include <asm/asm-offsets.h>
Martin Schwidefsky1ec27722015-08-20 17:28:44 +020035#include <asm/diag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010037#include <asm/gmap.h>
Heiko Carstensd7b250e2011-05-26 09:48:24 +020038#include <asm/irq.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010039#include <asm/mmu_context.h>
David Howellsa0616cd2012-03-28 18:30:02 +010040#include <asm/facility.h>
Heiko Carstensa8061702008-04-17 07:46:26 +020041#include "../kernel/entry.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#define __FAIL_ADDR_MASK -4096L
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define __SUBCODE_MASK 0x0600
45#define __PF_RES_FIELD 0x8000000000000000ULL
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Martin Schwidefsky50d72802009-12-07 12:51:45 +010047#define VM_FAULT_BADCONTEXT 0x010000
48#define VM_FAULT_BADMAP 0x020000
49#define VM_FAULT_BADACCESS 0x040000
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010050#define VM_FAULT_SIGNAL 0x080000
Dominik Dingel24eb3a82013-06-17 16:25:18 +020051#define VM_FAULT_PFAULT 0x100000
Martin Schwidefsky50d72802009-12-07 12:51:45 +010052
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020053enum fault_type {
54 KERNEL_FAULT,
55 USER_FAULT,
56 VDSO_FAULT,
57 GMAP_FAULT,
58};
59
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010060static unsigned long store_indication __read_mostly;
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020061
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010062static int __init fault_init(void)
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020063{
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010064 if (test_facility(75))
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020065 store_indication = 0xc00;
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010066 return 0;
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020067}
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010068early_initcall(fault_init);
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020069
Linus Torvalds1da177e2005-04-16 15:20:36 -070070/*
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020071 * Find out which address space caused the exception.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 */
Masahiro Yamadabf2f1ee2019-05-17 15:49:22 +090073static enum fault_type get_fault_type(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Heiko Carstens457f2182014-03-21 10:42:25 +010075 unsigned long trans_exc_code;
76
Heiko Carstens457f2182014-03-21 10:42:25 +010077 trans_exc_code = regs->int_parm_long & 3;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020078 if (likely(trans_exc_code == 0)) {
79 /* primary space exception */
80 if (IS_ENABLED(CONFIG_PGSTE) &&
81 test_pt_regs_flag(regs, PIF_GUEST_FAULT))
82 return GMAP_FAULT;
83 if (current->thread.mm_segment == USER_DS)
84 return USER_FAULT;
85 return KERNEL_FAULT;
86 }
87 if (trans_exc_code == 2) {
88 /* secondary space exception */
89 if (current->thread.mm_segment & 1) {
90 if (current->thread.mm_segment == USER_DS_SACF)
91 return USER_FAULT;
92 return KERNEL_FAULT;
93 }
94 return VDSO_FAULT;
95 }
Gerald Schaefer962f0af2019-05-27 18:40:19 +020096 if (trans_exc_code == 1) {
97 /* access register mode, not used in the kernel */
98 return USER_FAULT;
99 }
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200100 /* home space exception -> access via kernel ASCE */
101 return KERNEL_FAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
103
Heiko Carstens3b7df342014-04-07 10:20:40 +0200104static int bad_address(void *p)
105{
106 unsigned long dummy;
107
108 return probe_kernel_address((unsigned long *)p, dummy);
109}
110
Heiko Carstens3b7df342014-04-07 10:20:40 +0200111static void dump_pagetable(unsigned long asce, unsigned long address)
112{
Heiko Carstensfe7b2742017-05-22 13:16:00 +0200113 unsigned long *table = __va(asce & _ASCE_ORIGIN);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200114
115 pr_alert("AS:%016lx ", asce);
116 switch (asce & _ASCE_TYPE_MASK) {
117 case _ASCE_TYPE_REGION1:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200118 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200119 if (bad_address(table))
120 goto bad;
121 pr_cont("R1:%016lx ", *table);
122 if (*table & _REGION_ENTRY_INVALID)
123 goto out;
124 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
125 /* fallthrough */
126 case _ASCE_TYPE_REGION2:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200127 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200128 if (bad_address(table))
129 goto bad;
130 pr_cont("R2:%016lx ", *table);
131 if (*table & _REGION_ENTRY_INVALID)
132 goto out;
133 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
134 /* fallthrough */
135 case _ASCE_TYPE_REGION3:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200136 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200137 if (bad_address(table))
138 goto bad;
139 pr_cont("R3:%016lx ", *table);
140 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
141 goto out;
142 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
143 /* fallthrough */
144 case _ASCE_TYPE_SEGMENT:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200145 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200146 if (bad_address(table))
147 goto bad;
Joe Perches91c08372015-01-05 04:29:18 -0800148 pr_cont("S:%016lx ", *table);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200149 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
150 goto out;
151 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
152 }
Heiko Carstensf1c11742017-07-05 07:37:27 +0200153 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200154 if (bad_address(table))
155 goto bad;
156 pr_cont("P:%016lx ", *table);
157out:
158 pr_cont("\n");
159 return;
160bad:
161 pr_cont("BAD\n");
162}
163
Heiko Carstens3b7df342014-04-07 10:20:40 +0200164static void dump_fault_info(struct pt_regs *regs)
165{
166 unsigned long asce;
167
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100168 pr_alert("Failing address: %016lx TEID: %016lx\n",
169 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200170 pr_alert("Fault in ");
171 switch (regs->int_parm_long & 3) {
172 case 3:
173 pr_cont("home space ");
174 break;
175 case 2:
176 pr_cont("secondary space ");
177 break;
178 case 1:
179 pr_cont("access register ");
180 break;
181 case 0:
182 pr_cont("primary space ");
183 break;
184 }
185 pr_cont("mode while using ");
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200186 switch (get_fault_type(regs)) {
187 case USER_FAULT:
Heiko Carstens3b7df342014-04-07 10:20:40 +0200188 asce = S390_lowcore.user_asce;
189 pr_cont("user ");
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200190 break;
191 case VDSO_FAULT:
192 asce = S390_lowcore.vdso_asce;
193 pr_cont("vdso ");
194 break;
195 case GMAP_FAULT:
196 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
197 pr_cont("gmap ");
198 break;
199 case KERNEL_FAULT:
200 asce = S390_lowcore.kernel_asce;
201 pr_cont("kernel ");
202 break;
Masahiro Yamadabf2f1ee2019-05-17 15:49:22 +0900203 default:
204 unreachable();
Heiko Carstens3b7df342014-04-07 10:20:40 +0200205 }
206 pr_cont("ASCE.\n");
207 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
208}
209
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100210int show_unhandled_signals = 1;
211
212void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
Heiko Carstensab3c68e2010-05-17 10:00:21 +0200213{
214 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
215 return;
216 if (!unhandled_signal(current, signr))
217 return;
218 if (!printk_ratelimit())
219 return;
Hendrik Bruecknerdb1177ee2015-01-29 14:38:38 +0100220 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
Heiko Carstens413d4042014-11-19 13:31:08 +0100221 regs->int_code & 0xffff, regs->int_code >> 17);
Heiko Carstens9cb1cce2016-01-18 13:12:19 +0100222 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100223 printk(KERN_CONT "\n");
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100224 if (is_mm_fault)
225 dump_fault_info(regs);
Heiko Carstensab3c68e2010-05-17 10:00:21 +0200226 show_regs(regs);
227}
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229/*
230 * Send SIGSEGV to task. This is an external routine
231 * to keep the stack usage of do_page_fault small.
232 */
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100233static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100235 report_user_fault(regs, SIGSEGV, 1);
Eric W. Biederman9507a5d2018-04-15 19:58:32 -0500236 force_sig_fault(SIGSEGV, si_code,
Eric W. Biederman2e1661d22019-05-23 11:04:24 -0500237 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238}
239
Gerald Schaefera80313f2019-02-03 21:37:20 +0100240const struct exception_table_entry *s390_search_extables(unsigned long addr)
241{
242 const struct exception_table_entry *fixup;
243
244 fixup = search_extable(__start_dma_ex_table,
245 __stop_dma_ex_table - __start_dma_ex_table,
246 addr);
247 if (!fixup)
248 fixup = search_exception_tables(addr);
249 return fixup;
250}
251
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100252static noinline void do_no_context(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200253{
254 const struct exception_table_entry *fixup;
255
256 /* Are we prepared to handle this kernel fault? */
Gerald Schaefera80313f2019-02-03 21:37:20 +0100257 fixup = s390_search_extables(regs->psw.addr);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200258 if (fixup) {
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100259 regs->psw.addr = extable_fixup(fixup);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200260 return;
261 }
262
263 /*
264 * Oops. The kernel tried to access some bad page. We'll have to
265 * terminate things with extreme prejudice.
266 */
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200267 if (get_fault_type(regs) == KERNEL_FAULT)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200268 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
Heiko Carstens3b7df342014-04-07 10:20:40 +0200269 " in virtual kernel address space\n");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200270 else
271 printk(KERN_ALERT "Unable to handle kernel paging request"
Heiko Carstens3b7df342014-04-07 10:20:40 +0200272 " in virtual user address space\n");
Heiko Carstens3b7df342014-04-07 10:20:40 +0200273 dump_fault_info(regs);
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100274 die(regs, "Oops");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200275 do_exit(SIGKILL);
276}
277
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100278static noinline void do_low_address(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200279{
280 /* Low-address protection hit in kernel mode means
281 NULL pointer write access in kernel mode. */
282 if (regs->psw.mask & PSW_MASK_PSTATE) {
283 /* Low-address protection hit in user mode 'cannot happen'. */
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100284 die (regs, "Low-address protection");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200285 do_exit(SIGKILL);
286 }
287
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100288 do_no_context(regs);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200289}
290
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100291static noinline void do_sigbus(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200292{
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200293 /*
294 * Send a sigbus, regardless of whether we were in kernel
295 * or user mode.
296 */
Eric W. Biederman9507a5d2018-04-15 19:58:32 -0500297 force_sig_fault(SIGBUS, BUS_ADRERR,
Eric W. Biederman2e1661d22019-05-23 11:04:24 -0500298 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200299}
300
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100301static noinline int signal_return(struct pt_regs *regs)
302{
303 u16 instruction;
304 int rc;
305
306 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
307 if (rc)
308 return rc;
309 if (instruction == 0x0a77) {
310 set_pt_regs_flag(regs, PIF_SYSCALL);
311 regs->int_code = 0x00040077;
312 return 0;
313 } else if (instruction == 0x0aad) {
314 set_pt_regs_flag(regs, PIF_SYSCALL);
315 regs->int_code = 0x000400ad;
316 return 0;
317 }
318 return -EACCES;
319}
320
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700321static noinline void do_fault_error(struct pt_regs *regs, int access,
322 vm_fault_t fault)
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100323{
324 int si_code;
325
326 switch (fault) {
327 case VM_FAULT_BADACCESS:
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100328 if (access == VM_EXEC && signal_return(regs) == 0)
329 break;
Heiko Carstens7f5aa112019-07-29 07:39:44 +0200330 /* fallthrough */
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100331 case VM_FAULT_BADMAP:
332 /* Bad memory access. Check if it is kernel or user space. */
Heiko Carstens7d256172012-07-27 10:31:12 +0200333 if (user_mode(regs)) {
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100334 /* User mode accesses just cause a SIGSEGV */
335 si_code = (fault == VM_FAULT_BADMAP) ?
336 SEGV_MAPERR : SEGV_ACCERR;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100337 do_sigsegv(regs, si_code);
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100338 break;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100339 }
Heiko Carstens7f5aa112019-07-29 07:39:44 +0200340 /* fallthrough */
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100341 case VM_FAULT_BADCONTEXT:
Heiko Carstens7f5aa112019-07-29 07:39:44 +0200342 /* fallthrough */
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200343 case VM_FAULT_PFAULT:
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100344 do_no_context(regs);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100345 break;
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200346 case VM_FAULT_SIGNAL:
347 if (!user_mode(regs))
348 do_no_context(regs);
349 break;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100350 default: /* fault & VM_FAULT_ERROR */
Heiko Carstens99583182011-05-26 09:48:29 +0200351 if (fault & VM_FAULT_OOM) {
Heiko Carstens7d256172012-07-27 10:31:12 +0200352 if (!user_mode(regs))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100353 do_no_context(regs);
Heiko Carstens99583182011-05-26 09:48:29 +0200354 else
355 pagefault_out_of_memory();
Linus Torvalds33692f22015-01-29 10:51:32 -0800356 } else if (fault & VM_FAULT_SIGSEGV) {
357 /* Kernel mode? Handle exceptions or die */
358 if (!user_mode(regs))
359 do_no_context(regs);
360 else
361 do_sigsegv(regs, SEGV_MAPERR);
Heiko Carstens99583182011-05-26 09:48:29 +0200362 } else if (fault & VM_FAULT_SIGBUS) {
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100363 /* Kernel mode? Handle exceptions or die */
Heiko Carstens7d256172012-07-27 10:31:12 +0200364 if (!user_mode(regs))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100365 do_no_context(regs);
Martin Schwidefsky36bf9682010-10-25 16:10:35 +0200366 else
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100367 do_sigbus(regs);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100368 } else
369 BUG();
370 break;
371 }
372}
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374/*
375 * This routine handles page faults. It determines the address,
376 * and the problem, and then passes it off to one of the appropriate
377 * routines.
378 *
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100379 * interruption code (int_code):
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 * 04 Protection -> Write-Protection (suprression)
381 * 10 Segment translation -> Not present (nullification)
382 * 11 Page translation -> Not present (nullification)
383 * 3b Region third trans. -> Not present (nullification)
384 */
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700385static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200387 struct gmap *gmap;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200388 struct task_struct *tsk;
389 struct mm_struct *mm;
390 struct vm_area_struct *vma;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200391 enum fault_type type;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100392 unsigned long trans_exc_code;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200393 unsigned long address;
Heiko Carstens33ce6142011-05-26 09:48:30 +0200394 unsigned int flags;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700395 vm_fault_t fault;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Martin Schwidefsky39efd4e2012-11-21 16:36:27 +0100397 tsk = current;
398 /*
399 * The instruction that caused the program check has
400 * been nullified. Don't signal single step via SIGTRAP.
401 */
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +0200402 clear_pt_regs_flag(regs, PIF_PER_TRAP);
Martin Schwidefsky39efd4e2012-11-21 16:36:27 +0100403
Anshuman Khandualb98cca42019-07-16 16:28:00 -0700404 if (kprobe_page_fault(regs, 14))
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100405 return 0;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200406
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200407 mm = tsk->mm;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100408 trans_exc_code = regs->int_parm_long;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 /*
411 * Verify that the fault happened in user space, that
412 * we are not in an interrupt and that there is a
413 * user context.
414 */
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100415 fault = VM_FAULT_BADCONTEXT;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200416 type = get_fault_type(regs);
417 switch (type) {
418 case KERNEL_FAULT:
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100419 goto out;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200420 case VDSO_FAULT:
421 fault = VM_FAULT_BADMAP;
422 goto out;
423 case USER_FAULT:
424 case GMAP_FAULT:
425 if (faulthandler_disabled() || !mm)
426 goto out;
427 break;
428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100430 address = trans_exc_code & __FAIL_ADDR_MASK;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200431 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200432 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
Johannes Weiner759496b2013-09-12 15:13:39 -0700433 if (user_mode(regs))
434 flags |= FAULT_FLAG_USER;
Heiko Carstens33ce6142011-05-26 09:48:30 +0200435 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
436 flags |= FAULT_FLAG_WRITE;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200437 down_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200439 gmap = NULL;
440 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
441 gmap = (struct gmap *) S390_lowcore.gmap;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200442 current->thread.gmap_addr = address;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100443 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
David Hildenbrand4a494432016-03-08 12:31:52 +0100444 current->thread.gmap_int_code = regs->int_code & 0xffff;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200445 address = __gmap_translate(gmap, address);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200446 if (address == -EFAULT) {
447 fault = VM_FAULT_BADMAP;
448 goto out_up;
449 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200450 if (gmap->pfault_enabled)
451 flags |= FAULT_FLAG_RETRY_NOWAIT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200452 }
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200453
454retry:
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100455 fault = VM_FAULT_BADMAP;
Gerald Schaefer482b05d2007-03-05 23:35:54 +0100456 vma = find_vma(mm, address);
457 if (!vma)
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100458 goto out_up;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100459
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100460 if (unlikely(vma->vm_start > address)) {
461 if (!(vma->vm_flags & VM_GROWSDOWN))
462 goto out_up;
463 if (expand_stack(vma, address))
464 goto out_up;
465 }
466
467 /*
468 * Ok, we have a good vm_area for this memory access, so
469 * we can handle it..
470 */
471 fault = VM_FAULT_BADACCESS;
Martin Schwidefsky1ab947d2009-12-07 12:51:46 +0100472 if (unlikely(!(vma->vm_flags & access)))
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100473 goto out_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Gerald Schaefer53492b12008-04-30 13:38:46 +0200475 if (is_vm_hugetlb_page(vma))
476 address &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 /*
478 * If for any reason at all we couldn't handle the fault,
479 * make sure we exit gracefully rather than endlessly redo
480 * the fault.
481 */
Kirill A. Shutemovdcddffd2016-07-26 15:25:18 -0700482 fault = handle_mm_fault(vma, address, flags);
Peter Xu4ef87322020-04-01 21:08:06 -0700483 if (fault_signal_pending(fault, regs)) {
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200484 fault = VM_FAULT_SIGNAL;
Claudio Imbrenda306d6c42018-07-16 10:38:57 +0200485 if (flags & FAULT_FLAG_RETRY_NOWAIT)
486 goto out_up;
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200487 goto out;
488 }
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100489 if (unlikely(fault & VM_FAULT_ERROR))
490 goto out_up;
491
Heiko Carstens33ce6142011-05-26 09:48:30 +0200492 /*
493 * Major/minor page fault accounting is only done on the
494 * initial attempt. If we go through a retry, it is extremely
495 * likely that the page will be found in page cache at that point.
496 */
497 if (flags & FAULT_FLAG_ALLOW_RETRY) {
498 if (fault & VM_FAULT_MAJOR) {
499 tsk->maj_flt++;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200500 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
Heiko Carstens33ce6142011-05-26 09:48:30 +0200501 regs, address);
502 } else {
503 tsk->min_flt++;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200504 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
Heiko Carstens33ce6142011-05-26 09:48:30 +0200505 regs, address);
506 }
507 if (fault & VM_FAULT_RETRY) {
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200508 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
509 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200510 /* FAULT_FLAG_RETRY_NOWAIT has been set,
511 * mmap_sem has not been released */
512 current->thread.gmap_pfault = 1;
513 fault = VM_FAULT_PFAULT;
514 goto out_up;
515 }
Heiko Carstens33ce6142011-05-26 09:48:30 +0200516 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
517 * of starvation. */
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200518 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
519 FAULT_FLAG_RETRY_NOWAIT);
Shaohua Li45cac652012-10-08 16:32:19 -0700520 flags |= FAULT_FLAG_TRIED;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200521 down_read(&mm->mmap_sem);
Heiko Carstens33ce6142011-05-26 09:48:30 +0200522 goto retry;
523 }
Heiko Carstensbde69af2009-09-11 10:29:06 +0200524 }
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200525 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200526 address = __gmap_link(gmap, current->thread.gmap_addr,
527 address);
528 if (address == -EFAULT) {
529 fault = VM_FAULT_BADMAP;
530 goto out_up;
531 }
532 if (address == -ENOMEM) {
533 fault = VM_FAULT_OOM;
534 goto out_up;
535 }
536 }
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100537 fault = 0;
538out_up:
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200539 up_read(&mm->mmap_sem);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100540out:
541 return fault;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542}
543
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200544void do_protection_exception(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545{
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100546 unsigned long trans_exc_code;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700547 int access;
548 vm_fault_t fault;
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100549
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100550 trans_exc_code = regs->int_parm_long;
Martin Schwidefskyf752ac42013-04-16 13:25:06 +0200551 /*
552 * Protection exceptions are suppressing, decrement psw address.
553 * The exception to this rule are aborted transactions, for these
554 * the PSW already points to the correct location.
555 */
556 if (!(regs->int_code & 0x200))
557 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200558 /*
559 * Check for low-address protection. This needs to be treated
560 * as a special case because the translation exception code
561 * field is not guaranteed to contain valid data in this case.
562 */
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100563 if (unlikely(!(trans_exc_code & 4))) {
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100564 do_low_address(regs);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200565 return;
566 }
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100567 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
568 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
569 (regs->psw.addr & PAGE_MASK);
570 access = VM_EXEC;
571 fault = VM_FAULT_BADACCESS;
572 } else {
573 access = VM_WRITE;
574 fault = do_exception(regs, access);
575 }
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100576 if (unlikely(fault))
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100577 do_fault_error(regs, access, fault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578}
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200579NOKPROBE_SYMBOL(do_protection_exception);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200581void do_dat_exception(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700583 int access;
584 vm_fault_t fault;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100585
Martin Schwidefsky1ab947d2009-12-07 12:51:46 +0100586 access = VM_READ | VM_EXEC | VM_WRITE;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100587 fault = do_exception(regs, access);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100588 if (unlikely(fault))
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100589 do_fault_error(regs, access, fault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590}
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200591NOKPROBE_SYMBOL(do_dat_exception);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593#ifdef CONFIG_PFAULT
594/*
595 * 'pfault' pseudo page faults routines.
596 */
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100597static int pfault_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599static int __init nopfault(char *str)
600{
601 pfault_disable = 1;
602 return 1;
603}
604
605__setup("nopfault", nopfault);
606
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200607struct pfault_refbk {
608 u16 refdiagc;
609 u16 reffcode;
610 u16 refdwlen;
611 u16 refversn;
612 u64 refgaddr;
613 u64 refselmk;
614 u64 refcmpmk;
615 u64 reserved;
616} __attribute__ ((packed, aligned(8)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200618static struct pfault_refbk pfault_init_refbk = {
619 .refdiagc = 0x258,
620 .reffcode = 0,
621 .refdwlen = 5,
622 .refversn = 2,
623 .refgaddr = __LC_LPP,
624 .refselmk = 1ULL << 48,
625 .refcmpmk = 1ULL << 48,
626 .reserved = __PF_RES_FIELD
627};
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629int pfault_init(void)
630{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 int rc;
632
Carsten Ottef32269a2011-12-27 11:27:11 +0100633 if (pfault_disable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 return -1;
Martin Schwidefsky1ec27722015-08-20 17:28:44 +0200635 diag_stat_inc(DIAG_STAT_X258);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200636 asm volatile(
637 " diag %1,%0,0x258\n"
638 "0: j 2f\n"
639 "1: la %0,8\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 "2:\n"
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200641 EX_TABLE(0b,1b)
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200642 : "=d" (rc)
643 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 return rc;
645}
646
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200647static struct pfault_refbk pfault_fini_refbk = {
648 .refdiagc = 0x258,
649 .reffcode = 1,
650 .refdwlen = 5,
651 .refversn = 2,
652};
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654void pfault_fini(void)
655{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Carsten Ottef32269a2011-12-27 11:27:11 +0100657 if (pfault_disable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return;
Martin Schwidefsky1ec27722015-08-20 17:28:44 +0200659 diag_stat_inc(DIAG_STAT_X258);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200660 asm volatile(
661 " diag %0,0,0x258\n"
Heiko Carstens6c22c982016-06-10 09:57:05 +0200662 "0: nopr %%r7\n"
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200663 EX_TABLE(0b,0b)
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200664 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665}
666
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200667static DEFINE_SPINLOCK(pfault_lock);
668static LIST_HEAD(pfault_list);
669
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100670#define PF_COMPLETE 0x0080
671
672/*
673 * The mechanism of our pfault code: if Linux is running as guest, runs a user
674 * space process and the user space process accesses a page that the host has
675 * paged out we get a pfault interrupt.
676 *
677 * This allows us, within the guest, to schedule a different process. Without
678 * this mechanism the host would have to suspend the whole virtual cpu until
679 * the page has been paged in.
680 *
681 * So when we get such an interrupt then we set the state of the current task
682 * to uninterruptible and also set the need_resched flag. Both happens within
683 * interrupt context(!). If we later on want to return to user space we
684 * recognize the need_resched flag and then call schedule(). It's not very
685 * obvious how this works...
686 *
687 * Of course we have a lot of additional fun with the completion interrupt (->
688 * host signals that a page of a process has been paged in and the process can
689 * continue to run). This interrupt can arrive on any cpu and, since we have
690 * virtual cpus, actually appear before the interrupt that signals that a page
691 * is missing.
692 */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400693static void pfault_interrupt(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200694 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
696 struct task_struct *tsk;
697 __u16 subcode;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200698 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 /*
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100701 * Get the external interruption subcode & pfault initial/completion
702 * signal bit. VM stores this in the 'cpu address' field associated
703 * with the external interrupt.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400705 subcode = ext_code.subcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 if ((subcode & 0xff00) != __SUBCODE_MASK)
707 return;
Heiko Carstens420f42e2013-01-02 15:18:18 +0100708 inc_irq_stat(IRQEXT_PFL);
Heiko Carstens54c27792012-05-10 09:44:35 +0200709 /* Get the token (= pid of the affected task). */
Hendrik Brueckner544e8dd2016-03-08 14:00:23 +0100710 pid = param64 & LPP_PID_MASK;
Heiko Carstens54c27792012-05-10 09:44:35 +0200711 rcu_read_lock();
712 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
713 if (tsk)
714 get_task_struct(tsk);
715 rcu_read_unlock();
716 if (!tsk)
717 return;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200718 spin_lock(&pfault_lock);
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100719 if (subcode & PF_COMPLETE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 /* signal bit is set -> a page has been swapped in by VM */
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200721 if (tsk->thread.pfault_wait == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 /* Initial interrupt was faster than the completion
723 * interrupt. pfault_wait is valid. Set pfault_wait
724 * back to zero and wake up the process. This can
725 * safely be done because the task is still sleeping
Martin Schwidefskyb6d09442005-09-03 15:58:02 -0700726 * and can't produce new pfaults. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 tsk->thread.pfault_wait = 0;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200728 list_del(&tsk->thread.list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 wake_up_process(tsk);
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200730 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200731 } else {
732 /* Completion interrupt was faster than initial
733 * interrupt. Set pfault_wait to -1 so the initial
Heiko Carstensfa2fb2f2011-11-14 11:19:01 +0100734 * interrupt doesn't put the task to sleep.
735 * If the task is not running, ignore the completion
736 * interrupt since it must be a leftover of a PFAULT
737 * CANCEL operation which didn't remove all pending
738 * completion interrupts. */
739 if (tsk->state == TASK_RUNNING)
740 tsk->thread.pfault_wait = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 }
742 } else {
743 /* signal bit not set -> a real page is missing. */
Heiko Carstensd49f47f2012-05-10 10:47:21 +0200744 if (WARN_ON_ONCE(tsk != current))
745 goto out;
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200746 if (tsk->thread.pfault_wait == 1) {
747 /* Already on the list with a reference: put to sleep */
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100748 goto block;
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200749 } else if (tsk->thread.pfault_wait == -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 /* Completion interrupt was faster than the initial
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200751 * interrupt (pfault_wait == -1). Set pfault_wait
752 * back to zero and exit. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 tsk->thread.pfault_wait = 0;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200754 } else {
755 /* Initial interrupt arrived before completion
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200756 * interrupt. Let the task sleep.
757 * An extra task reference is needed since a different
758 * cpu may set the task state to TASK_RUNNING again
759 * before the scheduler is reached. */
760 get_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200761 tsk->thread.pfault_wait = 1;
762 list_add(&tsk->thread.list, &pfault_list);
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100763block:
764 /* Since this must be a userspace fault, there
765 * is no kernel task state to trample. Rely on the
766 * return to userspace schedule() to block. */
767 __set_current_state(TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 set_tsk_need_resched(tsk);
Martin Schwidefskyc3601922016-10-25 12:21:44 +0200769 set_preempt_need_resched();
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200770 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 }
Heiko Carstensd49f47f2012-05-10 10:47:21 +0200772out:
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200773 spin_unlock(&pfault_lock);
Heiko Carstens54c27792012-05-10 09:44:35 +0200774 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200775}
776
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200777static int pfault_cpu_dead(unsigned int cpu)
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200778{
779 struct thread_struct *thread, *next;
780 struct task_struct *tsk;
781
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200782 spin_lock_irq(&pfault_lock);
783 list_for_each_entry_safe(thread, next, &pfault_list, list) {
784 thread->pfault_wait = 0;
785 list_del(&thread->list);
786 tsk = container_of(thread, struct task_struct, thread);
787 wake_up_process(tsk);
788 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200789 }
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200790 spin_unlock_irq(&pfault_lock);
791 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100794static int __init pfault_irq_init(void)
Heiko Carstens29b08d22006-12-04 15:40:40 +0100795{
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100796 int rc;
Heiko Carstens29b08d22006-12-04 15:40:40 +0100797
Thomas Huth1dad0932014-03-31 15:24:08 +0200798 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200799 if (rc)
800 goto out_extint;
801 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
802 if (rc)
803 goto out_pfault;
Heiko Carstens82003c3e2013-09-04 13:35:45 +0200804 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200805 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
806 NULL, pfault_cpu_dead);
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100807 return 0;
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200808
809out_pfault:
Thomas Huth1dad0932014-03-31 15:24:08 +0200810 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200811out_extint:
812 pfault_disable = 1;
813 return rc;
Heiko Carstens29b08d22006-12-04 15:40:40 +0100814}
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100815early_initcall(pfault_irq_init);
816
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200817#endif /* CONFIG_PFAULT */