blob: 91ce03fd0c84c9544e8555621c175559cbbc6519 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
Heiko Carstens052ff462011-01-05 12:47:28 +010012#include <linux/kernel_stat.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020013#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/signal.h>
15#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010016#include <linux/sched/debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
Heiko Carstens77575912009-06-12 10:26:25 +020024#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/smp.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070026#include <linux/kdebug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/console.h>
Paul Gortmakerdcc096c2016-09-19 17:54:56 -040029#include <linux/extable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/hardirq.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020031#include <linux/kprobes.h>
Martin Schwidefskybe5ec362007-04-27 16:01:44 +020032#include <linux/uaccess.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020033#include <linux/hugetlb.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010034#include <asm/asm-offsets.h>
Martin Schwidefsky1ec27722015-08-20 17:28:44 +020035#include <asm/diag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010037#include <asm/gmap.h>
Heiko Carstensd7b250e2011-05-26 09:48:24 +020038#include <asm/irq.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010039#include <asm/mmu_context.h>
David Howellsa0616cd2012-03-28 18:30:02 +010040#include <asm/facility.h>
Heiko Carstensa8061702008-04-17 07:46:26 +020041#include "../kernel/entry.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#define __FAIL_ADDR_MASK -4096L
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define __SUBCODE_MASK 0x0600
45#define __PF_RES_FIELD 0x8000000000000000ULL
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Martin Schwidefsky50d72802009-12-07 12:51:45 +010047#define VM_FAULT_BADCONTEXT 0x010000
48#define VM_FAULT_BADMAP 0x020000
49#define VM_FAULT_BADACCESS 0x040000
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010050#define VM_FAULT_SIGNAL 0x080000
Dominik Dingel24eb3a82013-06-17 16:25:18 +020051#define VM_FAULT_PFAULT 0x100000
Martin Schwidefsky50d72802009-12-07 12:51:45 +010052
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020053enum fault_type {
54 KERNEL_FAULT,
55 USER_FAULT,
56 VDSO_FAULT,
57 GMAP_FAULT,
58};
59
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010060static unsigned long store_indication __read_mostly;
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020061
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010062static int __init fault_init(void)
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020063{
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010064 if (test_facility(75))
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020065 store_indication = 0xc00;
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010066 return 0;
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020067}
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010068early_initcall(fault_init);
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020069
Martin Schwidefsky7ecb3442009-12-07 12:51:44 +010070static inline int notify_page_fault(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +020071{
Christoph Hellwig33464e32007-05-04 18:47:46 +020072 int ret = 0;
73
74 /* kprobe_running() needs smp_processor_id() */
Heiko Carstens22e0a042010-02-26 22:37:45 +010075 if (kprobes_built_in() && !user_mode(regs)) {
Christoph Hellwig33464e32007-05-04 18:47:46 +020076 preempt_disable();
77 if (kprobe_running() && kprobe_fault_handler(regs, 14))
78 ret = 1;
79 preempt_enable();
80 }
Christoph Hellwig33464e32007-05-04 18:47:46 +020081 return ret;
Michael Grundy4ba069b2006-09-20 15:58:39 +020082}
Michael Grundy4ba069b2006-09-20 15:58:39 +020083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020085 * Find out which address space caused the exception.
86 * Access register mode is impossible, ignore space == 3.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
Masahiro Yamadabf2f1ee2019-05-17 15:49:22 +090088static enum fault_type get_fault_type(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
Heiko Carstens457f2182014-03-21 10:42:25 +010090 unsigned long trans_exc_code;
91
Heiko Carstens457f2182014-03-21 10:42:25 +010092 trans_exc_code = regs->int_parm_long & 3;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020093 if (likely(trans_exc_code == 0)) {
94 /* primary space exception */
95 if (IS_ENABLED(CONFIG_PGSTE) &&
96 test_pt_regs_flag(regs, PIF_GUEST_FAULT))
97 return GMAP_FAULT;
98 if (current->thread.mm_segment == USER_DS)
99 return USER_FAULT;
100 return KERNEL_FAULT;
101 }
102 if (trans_exc_code == 2) {
103 /* secondary space exception */
104 if (current->thread.mm_segment & 1) {
105 if (current->thread.mm_segment == USER_DS_SACF)
106 return USER_FAULT;
107 return KERNEL_FAULT;
108 }
109 return VDSO_FAULT;
110 }
111 /* home space exception -> access via kernel ASCE */
112 return KERNEL_FAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113}
114
Heiko Carstens3b7df342014-04-07 10:20:40 +0200115static int bad_address(void *p)
116{
117 unsigned long dummy;
118
119 return probe_kernel_address((unsigned long *)p, dummy);
120}
121
Heiko Carstens3b7df342014-04-07 10:20:40 +0200122static void dump_pagetable(unsigned long asce, unsigned long address)
123{
Heiko Carstensfe7b2742017-05-22 13:16:00 +0200124 unsigned long *table = __va(asce & _ASCE_ORIGIN);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200125
126 pr_alert("AS:%016lx ", asce);
127 switch (asce & _ASCE_TYPE_MASK) {
128 case _ASCE_TYPE_REGION1:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200129 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200130 if (bad_address(table))
131 goto bad;
132 pr_cont("R1:%016lx ", *table);
133 if (*table & _REGION_ENTRY_INVALID)
134 goto out;
135 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
136 /* fallthrough */
137 case _ASCE_TYPE_REGION2:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200138 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200139 if (bad_address(table))
140 goto bad;
141 pr_cont("R2:%016lx ", *table);
142 if (*table & _REGION_ENTRY_INVALID)
143 goto out;
144 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
145 /* fallthrough */
146 case _ASCE_TYPE_REGION3:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200147 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200148 if (bad_address(table))
149 goto bad;
150 pr_cont("R3:%016lx ", *table);
151 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
152 goto out;
153 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
154 /* fallthrough */
155 case _ASCE_TYPE_SEGMENT:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200156 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200157 if (bad_address(table))
158 goto bad;
Joe Perches91c08372015-01-05 04:29:18 -0800159 pr_cont("S:%016lx ", *table);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200160 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
161 goto out;
162 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
163 }
Heiko Carstensf1c11742017-07-05 07:37:27 +0200164 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
Heiko Carstens3b7df342014-04-07 10:20:40 +0200165 if (bad_address(table))
166 goto bad;
167 pr_cont("P:%016lx ", *table);
168out:
169 pr_cont("\n");
170 return;
171bad:
172 pr_cont("BAD\n");
173}
174
Heiko Carstens3b7df342014-04-07 10:20:40 +0200175static void dump_fault_info(struct pt_regs *regs)
176{
177 unsigned long asce;
178
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100179 pr_alert("Failing address: %016lx TEID: %016lx\n",
180 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200181 pr_alert("Fault in ");
182 switch (regs->int_parm_long & 3) {
183 case 3:
184 pr_cont("home space ");
185 break;
186 case 2:
187 pr_cont("secondary space ");
188 break;
189 case 1:
190 pr_cont("access register ");
191 break;
192 case 0:
193 pr_cont("primary space ");
194 break;
195 }
196 pr_cont("mode while using ");
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200197 switch (get_fault_type(regs)) {
198 case USER_FAULT:
Heiko Carstens3b7df342014-04-07 10:20:40 +0200199 asce = S390_lowcore.user_asce;
200 pr_cont("user ");
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200201 break;
202 case VDSO_FAULT:
203 asce = S390_lowcore.vdso_asce;
204 pr_cont("vdso ");
205 break;
206 case GMAP_FAULT:
207 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
208 pr_cont("gmap ");
209 break;
210 case KERNEL_FAULT:
211 asce = S390_lowcore.kernel_asce;
212 pr_cont("kernel ");
213 break;
Masahiro Yamadabf2f1ee2019-05-17 15:49:22 +0900214 default:
215 unreachable();
Heiko Carstens3b7df342014-04-07 10:20:40 +0200216 }
217 pr_cont("ASCE.\n");
218 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
219}
220
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100221int show_unhandled_signals = 1;
222
223void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
Heiko Carstensab3c68e2010-05-17 10:00:21 +0200224{
225 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
226 return;
227 if (!unhandled_signal(current, signr))
228 return;
229 if (!printk_ratelimit())
230 return;
Hendrik Bruecknerdb1177ee2015-01-29 14:38:38 +0100231 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
Heiko Carstens413d4042014-11-19 13:31:08 +0100232 regs->int_code & 0xffff, regs->int_code >> 17);
Heiko Carstens9cb1cce2016-01-18 13:12:19 +0100233 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100234 printk(KERN_CONT "\n");
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100235 if (is_mm_fault)
236 dump_fault_info(regs);
Heiko Carstensab3c68e2010-05-17 10:00:21 +0200237 show_regs(regs);
238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/*
241 * Send SIGSEGV to task. This is an external routine
242 * to keep the stack usage of do_page_fault small.
243 */
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100244static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100246 report_user_fault(regs, SIGSEGV, 1);
Eric W. Biederman9507a5d2018-04-15 19:58:32 -0500247 force_sig_fault(SIGSEGV, si_code,
248 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
249 current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Gerald Schaefera80313f2019-02-03 21:37:20 +0100252const struct exception_table_entry *s390_search_extables(unsigned long addr)
253{
254 const struct exception_table_entry *fixup;
255
256 fixup = search_extable(__start_dma_ex_table,
257 __stop_dma_ex_table - __start_dma_ex_table,
258 addr);
259 if (!fixup)
260 fixup = search_exception_tables(addr);
261 return fixup;
262}
263
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100264static noinline void do_no_context(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200265{
266 const struct exception_table_entry *fixup;
267
268 /* Are we prepared to handle this kernel fault? */
Gerald Schaefera80313f2019-02-03 21:37:20 +0100269 fixup = s390_search_extables(regs->psw.addr);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200270 if (fixup) {
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100271 regs->psw.addr = extable_fixup(fixup);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200272 return;
273 }
274
275 /*
276 * Oops. The kernel tried to access some bad page. We'll have to
277 * terminate things with extreme prejudice.
278 */
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200279 if (get_fault_type(regs) == KERNEL_FAULT)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200280 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
Heiko Carstens3b7df342014-04-07 10:20:40 +0200281 " in virtual kernel address space\n");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200282 else
283 printk(KERN_ALERT "Unable to handle kernel paging request"
Heiko Carstens3b7df342014-04-07 10:20:40 +0200284 " in virtual user address space\n");
Heiko Carstens3b7df342014-04-07 10:20:40 +0200285 dump_fault_info(regs);
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100286 die(regs, "Oops");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200287 do_exit(SIGKILL);
288}
289
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100290static noinline void do_low_address(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200291{
292 /* Low-address protection hit in kernel mode means
293 NULL pointer write access in kernel mode. */
294 if (regs->psw.mask & PSW_MASK_PSTATE) {
295 /* Low-address protection hit in user mode 'cannot happen'. */
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100296 die (regs, "Low-address protection");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200297 do_exit(SIGKILL);
298 }
299
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100300 do_no_context(regs);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200301}
302
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100303static noinline void do_sigbus(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200304{
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200305 /*
306 * Send a sigbus, regardless of whether we were in kernel
307 * or user mode.
308 */
Eric W. Biederman9507a5d2018-04-15 19:58:32 -0500309 force_sig_fault(SIGBUS, BUS_ADRERR,
310 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
311 current);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200312}
313
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100314static noinline int signal_return(struct pt_regs *regs)
315{
316 u16 instruction;
317 int rc;
318
319 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
320 if (rc)
321 return rc;
322 if (instruction == 0x0a77) {
323 set_pt_regs_flag(regs, PIF_SYSCALL);
324 regs->int_code = 0x00040077;
325 return 0;
326 } else if (instruction == 0x0aad) {
327 set_pt_regs_flag(regs, PIF_SYSCALL);
328 regs->int_code = 0x000400ad;
329 return 0;
330 }
331 return -EACCES;
332}
333
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700334static noinline void do_fault_error(struct pt_regs *regs, int access,
335 vm_fault_t fault)
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100336{
337 int si_code;
338
339 switch (fault) {
340 case VM_FAULT_BADACCESS:
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100341 if (access == VM_EXEC && signal_return(regs) == 0)
342 break;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100343 case VM_FAULT_BADMAP:
344 /* Bad memory access. Check if it is kernel or user space. */
Heiko Carstens7d256172012-07-27 10:31:12 +0200345 if (user_mode(regs)) {
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100346 /* User mode accesses just cause a SIGSEGV */
347 si_code = (fault == VM_FAULT_BADMAP) ?
348 SEGV_MAPERR : SEGV_ACCERR;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100349 do_sigsegv(regs, si_code);
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100350 break;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100351 }
352 case VM_FAULT_BADCONTEXT:
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200353 case VM_FAULT_PFAULT:
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100354 do_no_context(regs);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100355 break;
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200356 case VM_FAULT_SIGNAL:
357 if (!user_mode(regs))
358 do_no_context(regs);
359 break;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100360 default: /* fault & VM_FAULT_ERROR */
Heiko Carstens99583182011-05-26 09:48:29 +0200361 if (fault & VM_FAULT_OOM) {
Heiko Carstens7d256172012-07-27 10:31:12 +0200362 if (!user_mode(regs))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100363 do_no_context(regs);
Heiko Carstens99583182011-05-26 09:48:29 +0200364 else
365 pagefault_out_of_memory();
Linus Torvalds33692f22015-01-29 10:51:32 -0800366 } else if (fault & VM_FAULT_SIGSEGV) {
367 /* Kernel mode? Handle exceptions or die */
368 if (!user_mode(regs))
369 do_no_context(regs);
370 else
371 do_sigsegv(regs, SEGV_MAPERR);
Heiko Carstens99583182011-05-26 09:48:29 +0200372 } else if (fault & VM_FAULT_SIGBUS) {
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100373 /* Kernel mode? Handle exceptions or die */
Heiko Carstens7d256172012-07-27 10:31:12 +0200374 if (!user_mode(regs))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100375 do_no_context(regs);
Martin Schwidefsky36bf9682010-10-25 16:10:35 +0200376 else
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100377 do_sigbus(regs);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100378 } else
379 BUG();
380 break;
381 }
382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384/*
385 * This routine handles page faults. It determines the address,
386 * and the problem, and then passes it off to one of the appropriate
387 * routines.
388 *
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100389 * interruption code (int_code):
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 * 04 Protection -> Write-Protection (suprression)
391 * 10 Segment translation -> Not present (nullification)
392 * 11 Page translation -> Not present (nullification)
393 * 3b Region third trans. -> Not present (nullification)
394 */
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700395static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396{
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200397 struct gmap *gmap;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200398 struct task_struct *tsk;
399 struct mm_struct *mm;
400 struct vm_area_struct *vma;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200401 enum fault_type type;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100402 unsigned long trans_exc_code;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200403 unsigned long address;
Heiko Carstens33ce6142011-05-26 09:48:30 +0200404 unsigned int flags;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700405 vm_fault_t fault;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Martin Schwidefsky39efd4e2012-11-21 16:36:27 +0100407 tsk = current;
408 /*
409 * The instruction that caused the program check has
410 * been nullified. Don't signal single step via SIGTRAP.
411 */
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +0200412 clear_pt_regs_flag(regs, PIF_PER_TRAP);
Martin Schwidefsky39efd4e2012-11-21 16:36:27 +0100413
Martin Schwidefsky7ecb3442009-12-07 12:51:44 +0100414 if (notify_page_fault(regs))
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100415 return 0;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200416
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200417 mm = tsk->mm;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100418 trans_exc_code = regs->int_parm_long;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 /*
421 * Verify that the fault happened in user space, that
422 * we are not in an interrupt and that there is a
423 * user context.
424 */
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100425 fault = VM_FAULT_BADCONTEXT;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200426 type = get_fault_type(regs);
427 switch (type) {
428 case KERNEL_FAULT:
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100429 goto out;
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200430 case VDSO_FAULT:
431 fault = VM_FAULT_BADMAP;
432 goto out;
433 case USER_FAULT:
434 case GMAP_FAULT:
435 if (faulthandler_disabled() || !mm)
436 goto out;
437 break;
438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100440 address = trans_exc_code & __FAIL_ADDR_MASK;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200441 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200442 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
Johannes Weiner759496b2013-09-12 15:13:39 -0700443 if (user_mode(regs))
444 flags |= FAULT_FLAG_USER;
Heiko Carstens33ce6142011-05-26 09:48:30 +0200445 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
446 flags |= FAULT_FLAG_WRITE;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200447 down_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200449 gmap = NULL;
450 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
451 gmap = (struct gmap *) S390_lowcore.gmap;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200452 current->thread.gmap_addr = address;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100453 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
David Hildenbrand4a494432016-03-08 12:31:52 +0100454 current->thread.gmap_int_code = regs->int_code & 0xffff;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200455 address = __gmap_translate(gmap, address);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200456 if (address == -EFAULT) {
457 fault = VM_FAULT_BADMAP;
458 goto out_up;
459 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200460 if (gmap->pfault_enabled)
461 flags |= FAULT_FLAG_RETRY_NOWAIT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200462 }
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200463
464retry:
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100465 fault = VM_FAULT_BADMAP;
Gerald Schaefer482b05d2007-03-05 23:35:54 +0100466 vma = find_vma(mm, address);
467 if (!vma)
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100468 goto out_up;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100469
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100470 if (unlikely(vma->vm_start > address)) {
471 if (!(vma->vm_flags & VM_GROWSDOWN))
472 goto out_up;
473 if (expand_stack(vma, address))
474 goto out_up;
475 }
476
477 /*
478 * Ok, we have a good vm_area for this memory access, so
479 * we can handle it..
480 */
481 fault = VM_FAULT_BADACCESS;
Martin Schwidefsky1ab947d2009-12-07 12:51:46 +0100482 if (unlikely(!(vma->vm_flags & access)))
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100483 goto out_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Gerald Schaefer53492b12008-04-30 13:38:46 +0200485 if (is_vm_hugetlb_page(vma))
486 address &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 /*
488 * If for any reason at all we couldn't handle the fault,
489 * make sure we exit gracefully rather than endlessly redo
490 * the fault.
491 */
Kirill A. Shutemovdcddffd2016-07-26 15:25:18 -0700492 fault = handle_mm_fault(vma, address, flags);
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200493 /* No reason to continue if interrupted by SIGKILL. */
494 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
495 fault = VM_FAULT_SIGNAL;
Claudio Imbrenda306d6c42018-07-16 10:38:57 +0200496 if (flags & FAULT_FLAG_RETRY_NOWAIT)
497 goto out_up;
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200498 goto out;
499 }
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100500 if (unlikely(fault & VM_FAULT_ERROR))
501 goto out_up;
502
Heiko Carstens33ce6142011-05-26 09:48:30 +0200503 /*
504 * Major/minor page fault accounting is only done on the
505 * initial attempt. If we go through a retry, it is extremely
506 * likely that the page will be found in page cache at that point.
507 */
508 if (flags & FAULT_FLAG_ALLOW_RETRY) {
509 if (fault & VM_FAULT_MAJOR) {
510 tsk->maj_flt++;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200511 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
Heiko Carstens33ce6142011-05-26 09:48:30 +0200512 regs, address);
513 } else {
514 tsk->min_flt++;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200515 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
Heiko Carstens33ce6142011-05-26 09:48:30 +0200516 regs, address);
517 }
518 if (fault & VM_FAULT_RETRY) {
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200519 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
520 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200521 /* FAULT_FLAG_RETRY_NOWAIT has been set,
522 * mmap_sem has not been released */
523 current->thread.gmap_pfault = 1;
524 fault = VM_FAULT_PFAULT;
525 goto out_up;
526 }
Heiko Carstens33ce6142011-05-26 09:48:30 +0200527 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
528 * of starvation. */
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200529 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
530 FAULT_FLAG_RETRY_NOWAIT);
Shaohua Li45cac652012-10-08 16:32:19 -0700531 flags |= FAULT_FLAG_TRIED;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200532 down_read(&mm->mmap_sem);
Heiko Carstens33ce6142011-05-26 09:48:30 +0200533 goto retry;
534 }
Heiko Carstensbde69af2009-09-11 10:29:06 +0200535 }
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200536 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200537 address = __gmap_link(gmap, current->thread.gmap_addr,
538 address);
539 if (address == -EFAULT) {
540 fault = VM_FAULT_BADMAP;
541 goto out_up;
542 }
543 if (address == -ENOMEM) {
544 fault = VM_FAULT_OOM;
545 goto out_up;
546 }
547 }
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100548 fault = 0;
549out_up:
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200550 up_read(&mm->mmap_sem);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100551out:
552 return fault;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553}
554
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200555void do_protection_exception(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556{
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100557 unsigned long trans_exc_code;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700558 int access;
559 vm_fault_t fault;
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100560
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100561 trans_exc_code = regs->int_parm_long;
Martin Schwidefskyf752ac42013-04-16 13:25:06 +0200562 /*
563 * Protection exceptions are suppressing, decrement psw address.
564 * The exception to this rule are aborted transactions, for these
565 * the PSW already points to the correct location.
566 */
567 if (!(regs->int_code & 0x200))
568 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200569 /*
570 * Check for low-address protection. This needs to be treated
571 * as a special case because the translation exception code
572 * field is not guaranteed to contain valid data in this case.
573 */
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100574 if (unlikely(!(trans_exc_code & 4))) {
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100575 do_low_address(regs);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200576 return;
577 }
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100578 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
579 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
580 (regs->psw.addr & PAGE_MASK);
581 access = VM_EXEC;
582 fault = VM_FAULT_BADACCESS;
583 } else {
584 access = VM_WRITE;
585 fault = do_exception(regs, access);
586 }
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100587 if (unlikely(fault))
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100588 do_fault_error(regs, access, fault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200590NOKPROBE_SYMBOL(do_protection_exception);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200592void do_dat_exception(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700594 int access;
595 vm_fault_t fault;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100596
Martin Schwidefsky1ab947d2009-12-07 12:51:46 +0100597 access = VM_READ | VM_EXEC | VM_WRITE;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100598 fault = do_exception(regs, access);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100599 if (unlikely(fault))
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100600 do_fault_error(regs, access, fault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601}
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200602NOKPROBE_SYMBOL(do_dat_exception);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604#ifdef CONFIG_PFAULT
605/*
606 * 'pfault' pseudo page faults routines.
607 */
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100608static int pfault_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610static int __init nopfault(char *str)
611{
612 pfault_disable = 1;
613 return 1;
614}
615
616__setup("nopfault", nopfault);
617
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200618struct pfault_refbk {
619 u16 refdiagc;
620 u16 reffcode;
621 u16 refdwlen;
622 u16 refversn;
623 u64 refgaddr;
624 u64 refselmk;
625 u64 refcmpmk;
626 u64 reserved;
627} __attribute__ ((packed, aligned(8)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200629static struct pfault_refbk pfault_init_refbk = {
630 .refdiagc = 0x258,
631 .reffcode = 0,
632 .refdwlen = 5,
633 .refversn = 2,
634 .refgaddr = __LC_LPP,
635 .refselmk = 1ULL << 48,
636 .refcmpmk = 1ULL << 48,
637 .reserved = __PF_RES_FIELD
638};
639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640int pfault_init(void)
641{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 int rc;
643
Carsten Ottef32269a2011-12-27 11:27:11 +0100644 if (pfault_disable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 return -1;
Martin Schwidefsky1ec27722015-08-20 17:28:44 +0200646 diag_stat_inc(DIAG_STAT_X258);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200647 asm volatile(
648 " diag %1,%0,0x258\n"
649 "0: j 2f\n"
650 "1: la %0,8\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 "2:\n"
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200652 EX_TABLE(0b,1b)
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200653 : "=d" (rc)
654 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 return rc;
656}
657
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200658static struct pfault_refbk pfault_fini_refbk = {
659 .refdiagc = 0x258,
660 .reffcode = 1,
661 .refdwlen = 5,
662 .refversn = 2,
663};
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665void pfault_fini(void)
666{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Carsten Ottef32269a2011-12-27 11:27:11 +0100668 if (pfault_disable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 return;
Martin Schwidefsky1ec27722015-08-20 17:28:44 +0200670 diag_stat_inc(DIAG_STAT_X258);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200671 asm volatile(
672 " diag %0,0,0x258\n"
Heiko Carstens6c22c982016-06-10 09:57:05 +0200673 "0: nopr %%r7\n"
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200674 EX_TABLE(0b,0b)
Martin Schwidefsky00e9e662018-09-07 11:20:08 +0200675 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676}
677
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200678static DEFINE_SPINLOCK(pfault_lock);
679static LIST_HEAD(pfault_list);
680
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100681#define PF_COMPLETE 0x0080
682
683/*
684 * The mechanism of our pfault code: if Linux is running as guest, runs a user
685 * space process and the user space process accesses a page that the host has
686 * paged out we get a pfault interrupt.
687 *
688 * This allows us, within the guest, to schedule a different process. Without
689 * this mechanism the host would have to suspend the whole virtual cpu until
690 * the page has been paged in.
691 *
692 * So when we get such an interrupt then we set the state of the current task
693 * to uninterruptible and also set the need_resched flag. Both happens within
694 * interrupt context(!). If we later on want to return to user space we
695 * recognize the need_resched flag and then call schedule(). It's not very
696 * obvious how this works...
697 *
698 * Of course we have a lot of additional fun with the completion interrupt (->
699 * host signals that a page of a process has been paged in and the process can
700 * continue to run). This interrupt can arrive on any cpu and, since we have
701 * virtual cpus, actually appear before the interrupt that signals that a page
702 * is missing.
703 */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400704static void pfault_interrupt(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200705 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706{
707 struct task_struct *tsk;
708 __u16 subcode;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200709 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 /*
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100712 * Get the external interruption subcode & pfault initial/completion
713 * signal bit. VM stores this in the 'cpu address' field associated
714 * with the external interrupt.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400716 subcode = ext_code.subcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 if ((subcode & 0xff00) != __SUBCODE_MASK)
718 return;
Heiko Carstens420f42e2013-01-02 15:18:18 +0100719 inc_irq_stat(IRQEXT_PFL);
Heiko Carstens54c27792012-05-10 09:44:35 +0200720 /* Get the token (= pid of the affected task). */
Hendrik Brueckner544e8dd2016-03-08 14:00:23 +0100721 pid = param64 & LPP_PID_MASK;
Heiko Carstens54c27792012-05-10 09:44:35 +0200722 rcu_read_lock();
723 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
724 if (tsk)
725 get_task_struct(tsk);
726 rcu_read_unlock();
727 if (!tsk)
728 return;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200729 spin_lock(&pfault_lock);
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100730 if (subcode & PF_COMPLETE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* signal bit is set -> a page has been swapped in by VM */
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200732 if (tsk->thread.pfault_wait == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 /* Initial interrupt was faster than the completion
734 * interrupt. pfault_wait is valid. Set pfault_wait
735 * back to zero and wake up the process. This can
736 * safely be done because the task is still sleeping
Martin Schwidefskyb6d09442005-09-03 15:58:02 -0700737 * and can't produce new pfaults. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 tsk->thread.pfault_wait = 0;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200739 list_del(&tsk->thread.list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 wake_up_process(tsk);
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200741 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200742 } else {
743 /* Completion interrupt was faster than initial
744 * interrupt. Set pfault_wait to -1 so the initial
Heiko Carstensfa2fb2f2011-11-14 11:19:01 +0100745 * interrupt doesn't put the task to sleep.
746 * If the task is not running, ignore the completion
747 * interrupt since it must be a leftover of a PFAULT
748 * CANCEL operation which didn't remove all pending
749 * completion interrupts. */
750 if (tsk->state == TASK_RUNNING)
751 tsk->thread.pfault_wait = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753 } else {
754 /* signal bit not set -> a real page is missing. */
Heiko Carstensd49f47f2012-05-10 10:47:21 +0200755 if (WARN_ON_ONCE(tsk != current))
756 goto out;
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200757 if (tsk->thread.pfault_wait == 1) {
758 /* Already on the list with a reference: put to sleep */
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100759 goto block;
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200760 } else if (tsk->thread.pfault_wait == -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 /* Completion interrupt was faster than the initial
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200762 * interrupt (pfault_wait == -1). Set pfault_wait
763 * back to zero and exit. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 tsk->thread.pfault_wait = 0;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200765 } else {
766 /* Initial interrupt arrived before completion
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200767 * interrupt. Let the task sleep.
768 * An extra task reference is needed since a different
769 * cpu may set the task state to TASK_RUNNING again
770 * before the scheduler is reached. */
771 get_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200772 tsk->thread.pfault_wait = 1;
773 list_add(&tsk->thread.list, &pfault_list);
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100774block:
775 /* Since this must be a userspace fault, there
776 * is no kernel task state to trample. Rely on the
777 * return to userspace schedule() to block. */
778 __set_current_state(TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 set_tsk_need_resched(tsk);
Martin Schwidefskyc3601922016-10-25 12:21:44 +0200780 set_preempt_need_resched();
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 }
Heiko Carstensd49f47f2012-05-10 10:47:21 +0200783out:
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200784 spin_unlock(&pfault_lock);
Heiko Carstens54c27792012-05-10 09:44:35 +0200785 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200786}
787
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200788static int pfault_cpu_dead(unsigned int cpu)
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200789{
790 struct thread_struct *thread, *next;
791 struct task_struct *tsk;
792
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200793 spin_lock_irq(&pfault_lock);
794 list_for_each_entry_safe(thread, next, &pfault_list, list) {
795 thread->pfault_wait = 0;
796 list_del(&thread->list);
797 tsk = container_of(thread, struct task_struct, thread);
798 wake_up_process(tsk);
799 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200800 }
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200801 spin_unlock_irq(&pfault_lock);
802 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100805static int __init pfault_irq_init(void)
Heiko Carstens29b08d22006-12-04 15:40:40 +0100806{
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100807 int rc;
Heiko Carstens29b08d22006-12-04 15:40:40 +0100808
Thomas Huth1dad0932014-03-31 15:24:08 +0200809 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200810 if (rc)
811 goto out_extint;
812 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
813 if (rc)
814 goto out_pfault;
Heiko Carstens82003c3e2013-09-04 13:35:45 +0200815 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200816 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
817 NULL, pfault_cpu_dead);
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100818 return 0;
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200819
820out_pfault:
Thomas Huth1dad0932014-03-31 15:24:08 +0200821 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200822out_extint:
823 pfault_disable = 1;
824 return rc;
Heiko Carstens29b08d22006-12-04 15:40:40 +0100825}
Heiko Carstensfb0a9d7e2011-01-05 12:47:39 +0100826early_initcall(pfault_irq_init);
827
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200828#endif /* CONFIG_PFAULT */