Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 4 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
| 7 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 8 | * 2000-2002 x86-64 support by Andi Kleen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 13 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 14 | #include <linux/sched/task_stack.h> |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 15 | #include <linux/mm.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 16 | #include <linux/smp.h> |
| 17 | #include <linux/kernel.h> |
Thomas Gleixner | 3aac3eb | 2021-10-21 15:55:06 -0700 | [diff] [blame] | 18 | #include <linux/kstrtox.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 19 | #include <linux/errno.h> |
| 20 | #include <linux/wait.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 21 | #include <linux/tracehook.h> |
| 22 | #include <linux/unistd.h> |
| 23 | #include <linux/stddef.h> |
| 24 | #include <linux/personality.h> |
| 25 | #include <linux/uaccess.h> |
Avi Kivity | 7c68af6 | 2009-09-19 09:40:22 +0300 | [diff] [blame] | 26 | #include <linux/user-return-notifier.h> |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 27 | #include <linux/uprobes.h> |
Frederic Weisbecker | 91d1aa43 | 2012-11-27 19:33:25 +0100 | [diff] [blame] | 28 | #include <linux/context_tracking.h> |
Thomas Gleixner | 167fd21 | 2020-07-23 00:00:05 +0200 | [diff] [blame] | 29 | #include <linux/entry-common.h> |
Tautschnig, Michael | 4c8ca51 | 2018-03-14 09:41:42 +0000 | [diff] [blame] | 30 | #include <linux/syscalls.h> |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <asm/processor.h> |
| 33 | #include <asm/ucontext.h> |
Ingo Molnar | fcbc99c | 2015-04-30 08:45:02 +0200 | [diff] [blame] | 34 | #include <asm/fpu/signal.h> |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 35 | #include <asm/fpu/xstate.h> |
Roland McGrath | 6c3652e | 2008-01-30 13:30:42 +0100 | [diff] [blame] | 36 | #include <asm/vdso.h> |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 37 | #include <asm/mce.h> |
H. Peter Anvin | f28f0c2 | 2012-02-19 07:38:43 -0800 | [diff] [blame] | 38 | #include <asm/sighandling.h> |
Brian Gerst | ba3e127 | 2015-07-29 01:41:21 -0400 | [diff] [blame] | 39 | #include <asm/vm86.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 40 | |
| 41 | #ifdef CONFIG_X86_64 |
Christoph Hellwig | c3b3f52 | 2020-05-05 12:12:53 +0200 | [diff] [blame] | 42 | #include <linux/compat.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 43 | #include <asm/proto.h> |
| 44 | #include <asm/ia32_unistd.h> |
Thomas Gleixner | 3aac3eb | 2021-10-21 15:55:06 -0700 | [diff] [blame] | 45 | #include <asm/fpu/xstate.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 46 | #endif /* CONFIG_X86_64 */ |
| 47 | |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 48 | #include <asm/syscall.h> |
Hiroshi Shimamoto | 41af86f | 2008-12-17 18:50:32 -0800 | [diff] [blame] | 49 | #include <asm/sigframe.h> |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 50 | #include <asm/signal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 52 | #ifdef CONFIG_X86_64 |
| 53 | /* |
| 54 | * If regs->ss will cause an IRET fault, change it. Otherwise leave it |
| 55 | * alone. Using this generally makes no sense unless |
| 56 | * user_64bit_mode(regs) would return true. |
| 57 | */ |
| 58 | static void force_valid_ss(struct pt_regs *regs) |
| 59 | { |
| 60 | u32 ar; |
| 61 | asm volatile ("lar %[old_ss], %[ar]\n\t" |
| 62 | "jz 1f\n\t" /* If invalid: */ |
| 63 | "xorl %[ar], %[ar]\n\t" /* set ar = 0 */ |
| 64 | "1:" |
| 65 | : [ar] "=r" (ar) |
| 66 | : [old_ss] "rm" ((u16)regs->ss)); |
| 67 | |
| 68 | /* |
| 69 | * For a valid 64-bit user context, we need DPL 3, type |
| 70 | * read-write data or read-write exp-down data, and S and P |
| 71 | * set. We can't use VERW because VERW doesn't check the |
| 72 | * P bit. |
| 73 | */ |
| 74 | ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK; |
| 75 | if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) && |
| 76 | ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN)) |
| 77 | regs->ss = __USER_DS; |
| 78 | } |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 79 | # define CONTEXT_COPY_SIZE offsetof(struct sigcontext, reserved1) |
| 80 | #else |
| 81 | # define CONTEXT_COPY_SIZE sizeof(struct sigcontext) |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 82 | #endif |
| 83 | |
Thomas Gleixner | ee4ecdf | 2021-09-08 15:29:35 +0200 | [diff] [blame] | 84 | static bool restore_sigcontext(struct pt_regs *regs, |
| 85 | struct sigcontext __user *usc, |
| 86 | unsigned long uc_flags) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 87 | { |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 88 | struct sigcontext sc; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 89 | |
| 90 | /* Always make any pending restarted system calls return -EINTR */ |
Andy Lutomirski | f56141e | 2015-02-12 15:01:14 -0800 | [diff] [blame] | 91 | current->restart_block.fn = do_no_restart_syscall; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 92 | |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 93 | if (copy_from_user(&sc, usc, CONTEXT_COPY_SIZE)) |
Thomas Gleixner | ee4ecdf | 2021-09-08 15:29:35 +0200 | [diff] [blame] | 94 | return false; |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 95 | |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 96 | #ifdef CONFIG_X86_32 |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 97 | set_user_gs(regs, sc.gs); |
| 98 | regs->fs = sc.fs; |
| 99 | regs->es = sc.es; |
| 100 | regs->ds = sc.ds; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 101 | #endif /* CONFIG_X86_32 */ |
| 102 | |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 103 | regs->bx = sc.bx; |
| 104 | regs->cx = sc.cx; |
| 105 | regs->dx = sc.dx; |
| 106 | regs->si = sc.si; |
| 107 | regs->di = sc.di; |
| 108 | regs->bp = sc.bp; |
| 109 | regs->ax = sc.ax; |
| 110 | regs->sp = sc.sp; |
| 111 | regs->ip = sc.ip; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 112 | |
| 113 | #ifdef CONFIG_X86_64 |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 114 | regs->r8 = sc.r8; |
| 115 | regs->r9 = sc.r9; |
| 116 | regs->r10 = sc.r10; |
| 117 | regs->r11 = sc.r11; |
| 118 | regs->r12 = sc.r12; |
| 119 | regs->r13 = sc.r13; |
| 120 | regs->r14 = sc.r14; |
| 121 | regs->r15 = sc.r15; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 122 | #endif /* CONFIG_X86_64 */ |
| 123 | |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 124 | /* Get CS/SS and force CPL3 */ |
| 125 | regs->cs = sc.cs | 0x03; |
| 126 | regs->ss = sc.ss | 0x03; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 127 | |
Al Viro | 3add42c | 2020-02-15 12:56:57 -0500 | [diff] [blame] | 128 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS); |
| 129 | /* disable syscall checks */ |
| 130 | regs->orig_ax = -1; |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 131 | |
Peter Zijlstra | 88e4718 | 2019-04-03 09:39:48 +0200 | [diff] [blame] | 132 | #ifdef CONFIG_X86_64 |
| 133 | /* |
| 134 | * Fix up SS if needed for the benefit of old DOSEMU and |
| 135 | * CRIU. |
| 136 | */ |
| 137 | if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) |
| 138 | force_valid_ss(regs); |
| 139 | #endif |
| 140 | |
Thomas Gleixner | f3305be | 2021-09-08 15:29:37 +0200 | [diff] [blame] | 141 | return fpu__restore_sig((void __user *)sc.fpstate, |
| 142 | IS_ENABLED(CONFIG_X86_32)); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 143 | } |
| 144 | |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 145 | static __always_inline int |
| 146 | __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, |
H. Peter Anvin | 8513942 | 2012-02-19 07:43:09 -0800 | [diff] [blame] | 147 | struct pt_regs *regs, unsigned long mask) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 148 | { |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 149 | #ifdef CONFIG_X86_32 |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 150 | unsafe_put_user(get_user_gs(regs), |
| 151 | (unsigned int __user *)&sc->gs, Efault); |
| 152 | unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault); |
| 153 | unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault); |
| 154 | unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 155 | #endif /* CONFIG_X86_32 */ |
| 156 | |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 157 | unsafe_put_user(regs->di, &sc->di, Efault); |
| 158 | unsafe_put_user(regs->si, &sc->si, Efault); |
| 159 | unsafe_put_user(regs->bp, &sc->bp, Efault); |
| 160 | unsafe_put_user(regs->sp, &sc->sp, Efault); |
| 161 | unsafe_put_user(regs->bx, &sc->bx, Efault); |
| 162 | unsafe_put_user(regs->dx, &sc->dx, Efault); |
| 163 | unsafe_put_user(regs->cx, &sc->cx, Efault); |
| 164 | unsafe_put_user(regs->ax, &sc->ax, Efault); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 165 | #ifdef CONFIG_X86_64 |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 166 | unsafe_put_user(regs->r8, &sc->r8, Efault); |
| 167 | unsafe_put_user(regs->r9, &sc->r9, Efault); |
| 168 | unsafe_put_user(regs->r10, &sc->r10, Efault); |
| 169 | unsafe_put_user(regs->r11, &sc->r11, Efault); |
| 170 | unsafe_put_user(regs->r12, &sc->r12, Efault); |
| 171 | unsafe_put_user(regs->r13, &sc->r13, Efault); |
| 172 | unsafe_put_user(regs->r14, &sc->r14, Efault); |
| 173 | unsafe_put_user(regs->r15, &sc->r15, Efault); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 174 | #endif /* CONFIG_X86_64 */ |
| 175 | |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 176 | unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault); |
| 177 | unsafe_put_user(current->thread.error_code, &sc->err, Efault); |
| 178 | unsafe_put_user(regs->ip, &sc->ip, Efault); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 179 | #ifdef CONFIG_X86_32 |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 180 | unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault); |
| 181 | unsafe_put_user(regs->flags, &sc->flags, Efault); |
| 182 | unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault); |
| 183 | unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 184 | #else /* !CONFIG_X86_32 */ |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 185 | unsafe_put_user(regs->flags, &sc->flags, Efault); |
| 186 | unsafe_put_user(regs->cs, &sc->cs, Efault); |
| 187 | unsafe_put_user(0, &sc->gs, Efault); |
| 188 | unsafe_put_user(0, &sc->fs, Efault); |
| 189 | unsafe_put_user(regs->ss, &sc->ss, Efault); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 190 | #endif /* CONFIG_X86_32 */ |
| 191 | |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 192 | unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 193 | |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 194 | /* non-iBCS2 extensions.. */ |
| 195 | unsafe_put_user(mask, &sc->oldmask, Efault); |
| 196 | unsafe_put_user(current->thread.cr2, &sc->cr2, Efault); |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 197 | return 0; |
| 198 | Efault: |
Al Viro | 9f855c0 | 2020-02-15 17:25:27 -0500 | [diff] [blame] | 199 | return -EFAULT; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 200 | } |
| 201 | |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 202 | #define unsafe_put_sigcontext(sc, fp, regs, set, label) \ |
| 203 | do { \ |
| 204 | if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0])) \ |
| 205 | goto label; \ |
| 206 | } while(0); |
| 207 | |
Al Viro | b87df65 | 2020-02-15 21:36:52 -0500 | [diff] [blame] | 208 | #define unsafe_put_sigmask(set, frame, label) \ |
| 209 | unsafe_put_user(*(__u64 *)(set), \ |
| 210 | (__u64 __user *)&(frame)->uc.uc_sigmask, \ |
| 211 | label) |
| 212 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | * Set up a signal frame. |
| 215 | */ |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 216 | |
Chang S. Bae | 939ef71 | 2021-05-18 13:03:16 -0700 | [diff] [blame] | 217 | /* x86 ABI requires 16-byte alignment */ |
| 218 | #define FRAME_ALIGNMENT 16UL |
| 219 | |
| 220 | #define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1) |
| 221 | |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 222 | /* |
| 223 | * Determine which stack to use.. |
| 224 | */ |
Hiroshi Shimamoto | 1fae027 | 2009-02-27 10:30:32 -0800 | [diff] [blame] | 225 | static unsigned long align_sigframe(unsigned long sp) |
| 226 | { |
| 227 | #ifdef CONFIG_X86_32 |
| 228 | /* |
| 229 | * Align the stack pointer according to the i386 ABI, |
| 230 | * i.e. so that on function entry ((sp + 4) & 15) == 0. |
| 231 | */ |
Chang S. Bae | 939ef71 | 2021-05-18 13:03:16 -0700 | [diff] [blame] | 232 | sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4; |
Hiroshi Shimamoto | 1fae027 | 2009-02-27 10:30:32 -0800 | [diff] [blame] | 233 | #else /* !CONFIG_X86_32 */ |
Chang S. Bae | 939ef71 | 2021-05-18 13:03:16 -0700 | [diff] [blame] | 234 | sp = round_down(sp, FRAME_ALIGNMENT) - 8; |
Hiroshi Shimamoto | 1fae027 | 2009-02-27 10:30:32 -0800 | [diff] [blame] | 235 | #endif |
| 236 | return sp; |
| 237 | } |
| 238 | |
Denys Vlasenko | dae0f30 | 2015-09-28 14:23:57 +0200 | [diff] [blame] | 239 | static void __user * |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 240 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
| 241 | void __user **fpstate) |
| 242 | { |
| 243 | /* Default to using normal stack */ |
Chang S. Bae | 2beb4a5 | 2021-05-18 13:03:19 -0700 | [diff] [blame] | 244 | bool nested_altstack = on_sig_stack(regs->sp); |
| 245 | bool entering_altstack = false; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 246 | unsigned long math_size = 0; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 247 | unsigned long sp = regs->sp; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 248 | unsigned long buf_fx = 0; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 249 | |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 250 | /* redzone */ |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 251 | if (IS_ENABLED(CONFIG_X86_64)) |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 252 | sp -= 128; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 253 | |
Stas Sergeev | 0b4521e | 2016-04-14 23:20:02 +0300 | [diff] [blame] | 254 | /* This is the X/Open sanctioned signal stack switching. */ |
| 255 | if (ka->sa.sa_flags & SA_ONSTACK) { |
Chang S. Bae | 2beb4a5 | 2021-05-18 13:03:19 -0700 | [diff] [blame] | 256 | /* |
| 257 | * This checks nested_altstack via sas_ss_flags(). Sensible |
| 258 | * programs use SS_AUTODISARM, which disables that check, and |
| 259 | * programs that don't use SS_AUTODISARM get compatible. |
| 260 | */ |
| 261 | if (sas_ss_flags(sp) == 0) { |
Stas Sergeev | 0b4521e | 2016-04-14 23:20:02 +0300 | [diff] [blame] | 262 | sp = current->sas_ss_sp + current->sas_ss_size; |
Chang S. Bae | 2beb4a5 | 2021-05-18 13:03:19 -0700 | [diff] [blame] | 263 | entering_altstack = true; |
| 264 | } |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 265 | } else if (IS_ENABLED(CONFIG_X86_32) && |
Chang S. Bae | 2beb4a5 | 2021-05-18 13:03:19 -0700 | [diff] [blame] | 266 | !nested_altstack && |
Andy Lutomirski | 9950481 | 2017-07-28 06:00:32 -0700 | [diff] [blame] | 267 | regs->ss != __USER_DS && |
Stas Sergeev | 0b4521e | 2016-04-14 23:20:02 +0300 | [diff] [blame] | 268 | !(ka->sa.sa_flags & SA_RESTORER) && |
| 269 | ka->sa.sa_restorer) { |
| 270 | /* This is the legacy signal stack switching. */ |
| 271 | sp = (unsigned long) ka->sa.sa_restorer; |
Chang S. Bae | 2beb4a5 | 2021-05-18 13:03:19 -0700 | [diff] [blame] | 272 | entering_altstack = true; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 273 | } |
| 274 | |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 275 | sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), |
| 276 | &buf_fx, &math_size); |
| 277 | *fpstate = (void __user *)sp; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 278 | |
Hiroshi Shimamoto | 14fc9fb | 2009-03-19 10:56:29 -0700 | [diff] [blame] | 279 | sp = align_sigframe(sp - frame_size); |
| 280 | |
| 281 | /* |
| 282 | * If we are on the alternate signal stack and would overflow it, don't. |
| 283 | * Return an always-bogus address instead so we will die with SIGSEGV. |
| 284 | */ |
Chang S. Bae | 2beb4a5 | 2021-05-18 13:03:19 -0700 | [diff] [blame] | 285 | if (unlikely((nested_altstack || entering_altstack) && |
| 286 | !__on_sig_stack(sp))) { |
| 287 | |
| 288 | if (show_unhandled_signals && printk_ratelimit()) |
| 289 | pr_info("%s[%d] overflowed sigaltstack\n", |
| 290 | current->comm, task_pid_nr(current)); |
| 291 | |
Hiroshi Shimamoto | 14fc9fb | 2009-03-19 10:56:29 -0700 | [diff] [blame] | 292 | return (void __user *)-1L; |
Chang S. Bae | 2beb4a5 | 2021-05-18 13:03:19 -0700 | [diff] [blame] | 293 | } |
Hiroshi Shimamoto | 14fc9fb | 2009-03-19 10:56:29 -0700 | [diff] [blame] | 294 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 295 | /* save i387 and extended state */ |
Thomas Gleixner | 052adee | 2021-09-08 15:29:32 +0200 | [diff] [blame] | 296 | if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size)) |
Hiroshi Shimamoto | 14fc9fb | 2009-03-19 10:56:29 -0700 | [diff] [blame] | 297 | return (void __user *)-1L; |
| 298 | |
| 299 | return (void __user *)sp; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 300 | } |
| 301 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 302 | #ifdef CONFIG_X86_32 |
| 303 | static const struct { |
| 304 | u16 poplmovl; |
| 305 | u32 val; |
| 306 | u16 int80; |
| 307 | } __attribute__((packed)) retcode = { |
| 308 | 0xb858, /* popl %eax; movl $..., %eax */ |
| 309 | __NR_sigreturn, |
| 310 | 0x80cd, /* int $0x80 */ |
| 311 | }; |
| 312 | |
| 313 | static const struct { |
| 314 | u8 movl; |
| 315 | u32 val; |
| 316 | u16 int80; |
| 317 | u8 pad; |
| 318 | } __attribute__((packed)) rt_retcode = { |
| 319 | 0xb8, /* movl $..., %eax */ |
| 320 | __NR_rt_sigreturn, |
| 321 | 0x80cd, /* int $0x80 */ |
| 322 | 0 |
| 323 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 325 | static int |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 326 | __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 327 | struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | struct sigframe __user *frame; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 330 | void __user *restorer; |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 331 | void __user *fp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 333 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
Al Viro | 5c1f178 | 2020-02-15 21:18:02 -0500 | [diff] [blame] | 335 | if (!user_access_begin(frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 336 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | |
Al Viro | 5c1f178 | 2020-02-15 21:18:02 -0500 | [diff] [blame] | 338 | unsafe_put_user(sig, &frame->sig, Efault); |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 339 | unsafe_put_sigcontext(&frame->sc, fp, regs, set, Efault); |
Al Viro | 5c1f178 | 2020-02-15 21:18:02 -0500 | [diff] [blame] | 340 | unsafe_put_user(set->sig[1], &frame->extramask[0], Efault); |
Roland McGrath | 1a3e4ca | 2008-04-09 01:29:27 -0700 | [diff] [blame] | 341 | if (current->mm->context.vdso) |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 342 | restorer = current->mm->context.vdso + |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 343 | vdso_image_32.sym___kernel_sigreturn; |
Andi Kleen | 9fbbd4d | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 344 | else |
Jan Engelhardt | ade1af7 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 345 | restorer = &frame->retcode; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 346 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
| 347 | restorer = ksig->ka.sa.sa_restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | |
| 349 | /* Set up to return from userspace. */ |
Al Viro | 5c1f178 | 2020-02-15 21:18:02 -0500 | [diff] [blame] | 350 | unsafe_put_user(restorer, &frame->pretcode, Efault); |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 351 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | /* |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 353 | * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | * |
| 355 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
| 356 | * reasons and because gdb uses it as a signature to notice |
| 357 | * signal handler stack frames. |
| 358 | */ |
Al Viro | 5c1f178 | 2020-02-15 21:18:02 -0500 | [diff] [blame] | 359 | unsafe_put_user(*((u64 *)&retcode), (u64 *)frame->retcode, Efault); |
| 360 | user_access_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | |
| 362 | /* Set up registers for signal handler */ |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 363 | regs->sp = (unsigned long)frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 364 | regs->ip = (unsigned long)ksig->ka.sa.sa_handler; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 365 | regs->ax = (unsigned long)sig; |
Harvey Harrison | 92bc205 | 2008-02-08 12:09:56 -0800 | [diff] [blame] | 366 | regs->dx = 0; |
| 367 | regs->cx = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 369 | regs->ds = __USER_DS; |
| 370 | regs->es = __USER_DS; |
| 371 | regs->ss = __USER_DS; |
| 372 | regs->cs = __USER_CS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 374 | return 0; |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 375 | |
| 376 | Efault: |
| 377 | user_access_end(); |
| 378 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } |
| 380 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 381 | static int __setup_rt_frame(int sig, struct ksignal *ksig, |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 382 | sigset_t *set, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | struct rt_sigframe __user *frame; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 385 | void __user *restorer; |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 386 | void __user *fp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 388 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 390 | if (!user_access_begin(frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 391 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 393 | unsafe_put_user(sig, &frame->sig, Efault); |
| 394 | unsafe_put_user(&frame->info, &frame->pinfo, Efault); |
| 395 | unsafe_put_user(&frame->uc, &frame->puc, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 397 | /* Create the ucontext. */ |
| 398 | if (static_cpu_has(X86_FEATURE_XSAVE)) |
| 399 | unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault); |
| 400 | else |
| 401 | unsafe_put_user(0, &frame->uc.uc_flags, Efault); |
| 402 | unsafe_put_user(0, &frame->uc.uc_link, Efault); |
| 403 | unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 405 | /* Set up to return from userspace. */ |
| 406 | restorer = current->mm->context.vdso + |
| 407 | vdso_image_32.sym___kernel_rt_sigreturn; |
| 408 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
| 409 | restorer = ksig->ka.sa.sa_restorer; |
| 410 | unsafe_put_user(restorer, &frame->pretcode, Efault); |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 411 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 412 | /* |
| 413 | * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 |
| 414 | * |
| 415 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
| 416 | * reasons and because gdb uses it as a signature to notice |
| 417 | * signal handler stack frames. |
| 418 | */ |
| 419 | unsafe_put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode, Efault); |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 420 | unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); |
Al Viro | b87df65 | 2020-02-15 21:36:52 -0500 | [diff] [blame] | 421 | unsafe_put_sigmask(set, frame, Efault); |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 422 | user_access_end(); |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 423 | |
Al Viro | ead8e4e | 2020-02-15 21:22:39 -0500 | [diff] [blame] | 424 | if (copy_siginfo_to_user(&frame->info, &ksig->info)) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 425 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
| 427 | /* Set up registers for signal handler */ |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 428 | regs->sp = (unsigned long)frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 429 | regs->ip = (unsigned long)ksig->ka.sa.sa_handler; |
Hiroshi Shimamoto | 13ad772 | 2008-09-05 16:28:38 -0700 | [diff] [blame] | 430 | regs->ax = (unsigned long)sig; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 431 | regs->dx = (unsigned long)&frame->info; |
| 432 | regs->cx = (unsigned long)&frame->uc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 434 | regs->ds = __USER_DS; |
| 435 | regs->es = __USER_DS; |
| 436 | regs->ss = __USER_DS; |
| 437 | regs->cs = __USER_CS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 439 | return 0; |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 440 | Efault: |
| 441 | user_access_end(); |
| 442 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | } |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 444 | #else /* !CONFIG_X86_32 */ |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 445 | static unsigned long frame_uc_flags(struct pt_regs *regs) |
| 446 | { |
| 447 | unsigned long flags; |
| 448 | |
Borislav Petkov | d366bf7 | 2016-04-04 22:25:02 +0200 | [diff] [blame] | 449 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 450 | flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS; |
| 451 | else |
| 452 | flags = UC_SIGCONTEXT_SS; |
| 453 | |
| 454 | if (likely(user_64bit_mode(regs))) |
| 455 | flags |= UC_STRICT_RESTORE_SS; |
| 456 | |
| 457 | return flags; |
| 458 | } |
| 459 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 460 | static int __setup_rt_frame(int sig, struct ksignal *ksig, |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 461 | sigset_t *set, struct pt_regs *regs) |
| 462 | { |
| 463 | struct rt_sigframe __user *frame; |
| 464 | void __user *fp = NULL; |
Peter Zijlstra | 88e4718 | 2019-04-03 09:39:48 +0200 | [diff] [blame] | 465 | unsigned long uc_flags; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 466 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 467 | /* x86-64 should always use SA_RESTORER. */ |
| 468 | if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) |
| 469 | return -EFAULT; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 470 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 471 | frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); |
Peter Zijlstra | 88e4718 | 2019-04-03 09:39:48 +0200 | [diff] [blame] | 472 | uc_flags = frame_uc_flags(regs); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 473 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 474 | if (!user_access_begin(frame, sizeof(*frame))) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 475 | return -EFAULT; |
| 476 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 477 | /* Create the ucontext. */ |
| 478 | unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); |
| 479 | unsafe_put_user(0, &frame->uc.uc_link, Efault); |
| 480 | unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 481 | |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 482 | /* Set up to return from userspace. If provided, use a stub |
| 483 | already in userspace. */ |
| 484 | unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault); |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 485 | unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); |
Al Viro | b87df65 | 2020-02-15 21:36:52 -0500 | [diff] [blame] | 486 | unsafe_put_sigmask(set, frame, Efault); |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 487 | user_access_end(); |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 488 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 489 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
| 490 | if (copy_siginfo_to_user(&frame->info, &ksig->info)) |
| 491 | return -EFAULT; |
| 492 | } |
| 493 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 494 | /* Set up registers for signal handler */ |
| 495 | regs->di = sig; |
| 496 | /* In case the signal handler was declared without prototypes */ |
| 497 | regs->ax = 0; |
| 498 | |
| 499 | /* This also works for non SA_SIGINFO handlers because they expect the |
| 500 | next argument after the signal number on the stack. */ |
| 501 | regs->si = (unsigned long)&frame->info; |
| 502 | regs->dx = (unsigned long)&frame->uc; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 503 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 504 | |
| 505 | regs->sp = (unsigned long)frame; |
| 506 | |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 507 | /* |
| 508 | * Set up the CS and SS registers to run signal handlers in |
| 509 | * 64-bit mode, even if the handler happens to be interrupting |
| 510 | * 32-bit or 16-bit code. |
| 511 | * |
| 512 | * SS is subtle. In 64-bit mode, we don't need any particular |
| 513 | * SS descriptor, but we do need SS to be valid. It's possible |
| 514 | * that the old SS is entirely bogus -- this can happen if the |
| 515 | * signal we're trying to deliver is #GP or #SS caused by a bad |
Ingo Molnar | d9f6e12 | 2021-03-18 15:28:01 +0100 | [diff] [blame] | 516 | * SS value. We also have a compatibility issue here: DOSEMU |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 517 | * relies on the contents of the SS register indicating the |
| 518 | * SS value at the time of the signal, even though that code in |
| 519 | * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU |
| 520 | * avoids relying on sigreturn to restore SS; instead it uses |
| 521 | * a trampoline.) So we do our best: if the old SS was valid, |
| 522 | * we keep it. Otherwise we replace it. |
| 523 | */ |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 524 | regs->cs = __USER_CS; |
| 525 | |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 526 | if (unlikely(regs->ss != __USER_DS)) |
| 527 | force_valid_ss(regs); |
| 528 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 529 | return 0; |
Al Viro | 119cd59 | 2020-02-15 19:54:56 -0500 | [diff] [blame] | 530 | |
| 531 | Efault: |
| 532 | user_access_end(); |
| 533 | return -EFAULT; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 534 | } |
| 535 | #endif /* CONFIG_X86_32 */ |
| 536 | |
Christoph Hellwig | c3b3f52 | 2020-05-05 12:12:53 +0200 | [diff] [blame] | 537 | #ifdef CONFIG_X86_X32_ABI |
| 538 | static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to, |
| 539 | const struct kernel_siginfo *from) |
| 540 | { |
| 541 | struct compat_siginfo new; |
| 542 | |
| 543 | copy_siginfo_to_external32(&new, from); |
| 544 | if (from->si_signo == SIGCHLD) { |
| 545 | new._sifields._sigchld_x32._utime = from->si_utime; |
| 546 | new._sifields._sigchld_x32._stime = from->si_stime; |
| 547 | } |
| 548 | if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) |
| 549 | return -EFAULT; |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, |
| 554 | const struct kernel_siginfo *from) |
| 555 | { |
| 556 | if (in_x32_syscall()) |
| 557 | return x32_copy_siginfo_to_user(to, from); |
| 558 | return __copy_siginfo_to_user32(to, from); |
| 559 | } |
| 560 | #endif /* CONFIG_X86_X32_ABI */ |
| 561 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 562 | static int x32_setup_rt_frame(struct ksignal *ksig, |
| 563 | compat_sigset_t *set, |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 564 | struct pt_regs *regs) |
| 565 | { |
| 566 | #ifdef CONFIG_X86_X32_ABI |
| 567 | struct rt_sigframe_x32 __user *frame; |
Peter Zijlstra | 88e4718 | 2019-04-03 09:39:48 +0200 | [diff] [blame] | 568 | unsigned long uc_flags; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 569 | void __user *restorer; |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 570 | void __user *fp = NULL; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 571 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 572 | if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 573 | return -EFAULT; |
| 574 | |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 575 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp); |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 576 | |
Peter Zijlstra | 88e4718 | 2019-04-03 09:39:48 +0200 | [diff] [blame] | 577 | uc_flags = frame_uc_flags(regs); |
| 578 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 579 | if (!user_access_begin(frame, sizeof(*frame))) |
| 580 | return -EFAULT; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 581 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 582 | /* Create the ucontext. */ |
| 583 | unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); |
| 584 | unsafe_put_user(0, &frame->uc.uc_link, Efault); |
| 585 | unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); |
| 586 | unsafe_put_user(0, &frame->uc.uc__pad0, Efault); |
| 587 | restorer = ksig->ka.sa.sa_restorer; |
| 588 | unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault); |
Al Viro | b00d8f8 | 2020-02-15 21:12:26 -0500 | [diff] [blame] | 589 | unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); |
Al Viro | b87df65 | 2020-02-15 21:36:52 -0500 | [diff] [blame] | 590 | unsafe_put_sigmask(set, frame, Efault); |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 591 | user_access_end(); |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 592 | |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 593 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
Christoph Hellwig | c3b3f52 | 2020-05-05 12:12:53 +0200 | [diff] [blame] | 594 | if (x32_copy_siginfo_to_user(&frame->info, &ksig->info)) |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 595 | return -EFAULT; |
| 596 | } |
| 597 | |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 598 | /* Set up registers for signal handler */ |
| 599 | regs->sp = (unsigned long) frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 600 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 601 | |
| 602 | /* We use the x32 calling convention here... */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 603 | regs->di = ksig->sig; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 604 | regs->si = (unsigned long) &frame->info; |
| 605 | regs->dx = (unsigned long) &frame->uc; |
| 606 | |
| 607 | loadsegment(ds, __USER_DS); |
| 608 | loadsegment(es, __USER_DS); |
| 609 | |
| 610 | regs->cs = __USER_CS; |
| 611 | regs->ss = __USER_DS; |
| 612 | #endif /* CONFIG_X86_X32_ABI */ |
| 613 | |
| 614 | return 0; |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 615 | #ifdef CONFIG_X86_X32_ABI |
| 616 | Efault: |
| 617 | user_access_end(); |
| 618 | return -EFAULT; |
| 619 | #endif |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 620 | } |
| 621 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 622 | /* |
| 623 | * Do a signal return; undo the signal stack. |
| 624 | */ |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 625 | #ifdef CONFIG_X86_32 |
Tautschnig, Michael | 4c8ca51 | 2018-03-14 09:41:42 +0000 | [diff] [blame] | 626 | SYSCALL_DEFINE0(sigreturn) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 627 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 628 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 629 | struct sigframe __user *frame; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 630 | sigset_t set; |
| 631 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 632 | frame = (struct sigframe __user *)(regs->sp - 8); |
| 633 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 634 | if (!access_ok(frame, sizeof(*frame))) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 635 | goto badframe; |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 636 | if (__get_user(set.sig[0], &frame->sc.oldmask) || |
| 637 | __get_user(set.sig[1], &frame->extramask[0])) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 638 | goto badframe; |
| 639 | |
Oleg Nesterov | 3982294 | 2011-07-10 21:27:27 +0200 | [diff] [blame] | 640 | set_current_blocked(&set); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 641 | |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 642 | /* |
| 643 | * x86_32 has no uc_flags bits relevant to restore_sigcontext. |
| 644 | * Save a few cycles by skipping the __get_user. |
| 645 | */ |
Thomas Gleixner | ee4ecdf | 2021-09-08 15:29:35 +0200 | [diff] [blame] | 646 | if (!restore_sigcontext(regs, &frame->sc, 0)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 647 | goto badframe; |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 648 | return regs->ax; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 649 | |
| 650 | badframe: |
Hiroshi Shimamoto | ae417bb | 2008-12-16 14:02:16 -0800 | [diff] [blame] | 651 | signal_fault(regs, frame, "sigreturn"); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 652 | |
| 653 | return 0; |
| 654 | } |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 655 | #endif /* CONFIG_X86_32 */ |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 656 | |
Tautschnig, Michael | 4c8ca51 | 2018-03-14 09:41:42 +0000 | [diff] [blame] | 657 | SYSCALL_DEFINE0(rt_sigreturn) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 658 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 659 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 660 | struct rt_sigframe __user *frame; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 661 | sigset_t set; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 662 | unsigned long uc_flags; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 663 | |
| 664 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 665 | if (!access_ok(frame, sizeof(*frame))) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 666 | goto badframe; |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 667 | if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 668 | goto badframe; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 669 | if (__get_user(uc_flags, &frame->uc.uc_flags)) |
| 670 | goto badframe; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 671 | |
Oleg Nesterov | e9bd3f0 | 2011-04-27 21:09:39 +0200 | [diff] [blame] | 672 | set_current_blocked(&set); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 673 | |
Thomas Gleixner | ee4ecdf | 2021-09-08 15:29:35 +0200 | [diff] [blame] | 674 | if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 675 | goto badframe; |
| 676 | |
Al Viro | c40702c | 2012-11-20 14:24:26 -0500 | [diff] [blame] | 677 | if (restore_altstack(&frame->uc.uc_stack)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 678 | goto badframe; |
| 679 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 680 | return regs->ax; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 681 | |
| 682 | badframe: |
| 683 | signal_fault(regs, frame, "rt_sigreturn"); |
| 684 | return 0; |
| 685 | } |
| 686 | |
Chang S. Bae | 939ef71 | 2021-05-18 13:03:16 -0700 | [diff] [blame] | 687 | /* |
| 688 | * There are four different struct types for signal frame: sigframe_ia32, |
| 689 | * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case |
| 690 | * -- the largest size. It means the size for 64-bit apps is a bit more |
| 691 | * than needed, but this keeps the code simple. |
| 692 | */ |
| 693 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
| 694 | # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32) |
| 695 | #else |
| 696 | # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe) |
| 697 | #endif |
| 698 | |
| 699 | /* |
| 700 | * The FP state frame contains an XSAVE buffer which must be 64-byte aligned. |
| 701 | * If a signal frame starts at an unaligned address, extra space is required. |
| 702 | * This is the max alignment padding, conservatively. |
| 703 | */ |
| 704 | #define MAX_XSAVE_PADDING 63UL |
| 705 | |
| 706 | /* |
| 707 | * The frame data is composed of the following areas and laid out as: |
| 708 | * |
| 709 | * ------------------------- |
| 710 | * | alignment padding | |
| 711 | * ------------------------- |
| 712 | * | (f)xsave frame | |
| 713 | * ------------------------- |
| 714 | * | fsave header | |
| 715 | * ------------------------- |
| 716 | * | alignment padding | |
| 717 | * ------------------------- |
| 718 | * | siginfo + ucontext | |
| 719 | * ------------------------- |
| 720 | */ |
| 721 | |
| 722 | /* max_frame_size tells userspace the worst case signal stack size. */ |
| 723 | static unsigned long __ro_after_init max_frame_size; |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 724 | static unsigned int __ro_after_init fpu_default_state_size; |
Chang S. Bae | 939ef71 | 2021-05-18 13:03:16 -0700 | [diff] [blame] | 725 | |
| 726 | void __init init_sigframe_size(void) |
| 727 | { |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 728 | fpu_default_state_size = fpu__get_fpstate_size(); |
| 729 | |
Chang S. Bae | 939ef71 | 2021-05-18 13:03:16 -0700 | [diff] [blame] | 730 | max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING; |
| 731 | |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 732 | max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING; |
Chang S. Bae | 939ef71 | 2021-05-18 13:03:16 -0700 | [diff] [blame] | 733 | |
| 734 | /* Userspace expects an aligned size. */ |
| 735 | max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); |
| 736 | |
| 737 | pr_info("max sigframe size: %lu\n", max_frame_size); |
| 738 | } |
| 739 | |
Chang S. Bae | 1c33bb0 | 2021-05-18 13:03:17 -0700 | [diff] [blame] | 740 | unsigned long get_sigframe_size(void) |
| 741 | { |
| 742 | return max_frame_size; |
| 743 | } |
| 744 | |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 745 | static inline int is_ia32_compat_frame(struct ksignal *ksig) |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 746 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 747 | return IS_ENABLED(CONFIG_IA32_EMULATION) && |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 748 | ksig->ka.sa.sa_flags & SA_IA32_ABI; |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 749 | } |
| 750 | |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 751 | static inline int is_ia32_frame(struct ksignal *ksig) |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 752 | { |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 753 | return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig); |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 754 | } |
| 755 | |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 756 | static inline int is_x32_frame(struct ksignal *ksig) |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 757 | { |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 758 | return IS_ENABLED(CONFIG_X86_X32_ABI) && |
| 759 | ksig->ka.sa.sa_flags & SA_X32_ABI; |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 760 | } |
| 761 | |
Roland McGrath | 7c1def1 | 2005-06-23 00:08:21 -0700 | [diff] [blame] | 762 | static int |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 763 | setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 764 | { |
Richard Weinberger | 3050a35 | 2014-07-13 17:43:51 +0200 | [diff] [blame] | 765 | int usig = ksig->sig; |
Al Viro | b7f9a11 | 2012-05-02 09:59:21 -0400 | [diff] [blame] | 766 | sigset_t *set = sigmask_to_save(); |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 767 | compat_sigset_t *cset = (compat_sigset_t *) set; |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 768 | |
Mathieu Desnoyers | bff9504 | 2019-03-05 14:47:53 -0500 | [diff] [blame] | 769 | /* Perform fixup for the pre-signal frame. */ |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 770 | rseq_signal_deliver(ksig, regs); |
Mathieu Desnoyers | d6761b8 | 2018-06-02 08:43:58 -0400 | [diff] [blame] | 771 | |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 772 | /* Set up the stack frame */ |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 773 | if (is_ia32_frame(ksig)) { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 774 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
| 775 | return ia32_setup_rt_frame(usig, ksig, cset, regs); |
Hiroshi Shimamoto | 455edbc | 2008-09-24 19:13:11 -0700 | [diff] [blame] | 776 | else |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 777 | return ia32_setup_frame(usig, ksig, cset, regs); |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 778 | } else if (is_x32_frame(ksig)) { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 779 | return x32_setup_rt_frame(ksig, cset, regs); |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 780 | } else { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 781 | return __setup_rt_frame(ksig->sig, ksig, set, regs); |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 782 | } |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 783 | } |
| 784 | |
Al Viro | a610d6e | 2012-05-21 23:42:15 -0400 | [diff] [blame] | 785 | static void |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 786 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | { |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 788 | bool stepping, failed; |
Ingo Molnar | c5bedc6 | 2015-04-23 12:49:20 +0200 | [diff] [blame] | 789 | struct fpu *fpu = ¤t->thread.fpu; |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 790 | |
Brian Gerst | 5ed92a8 | 2015-07-29 01:41:19 -0400 | [diff] [blame] | 791 | if (v8086_mode(regs)) |
| 792 | save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); |
| 793 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | /* Are we from a system call? */ |
H. Peter Anvin | 9ddcb87 | 2021-05-10 11:53:15 -0700 | [diff] [blame] | 795 | if (syscall_get_nr(current, regs) != -1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | /* If so, check system call restarting.. */ |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 797 | switch (syscall_get_error(current, regs)) { |
Harvey Harrison | ac66f3f | 2008-02-08 12:09:58 -0800 | [diff] [blame] | 798 | case -ERESTART_RESTARTBLOCK: |
| 799 | case -ERESTARTNOHAND: |
| 800 | regs->ax = -EINTR; |
| 801 | break; |
| 802 | |
| 803 | case -ERESTARTSYS: |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 804 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 805 | regs->ax = -EINTR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | break; |
Harvey Harrison | ac66f3f | 2008-02-08 12:09:58 -0800 | [diff] [blame] | 807 | } |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 808 | fallthrough; |
Harvey Harrison | ac66f3f | 2008-02-08 12:09:58 -0800 | [diff] [blame] | 809 | case -ERESTARTNOINTR: |
| 810 | regs->ax = regs->orig_ax; |
| 811 | regs->ip -= 2; |
| 812 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | } |
| 814 | } |
| 815 | |
| 816 | /* |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 817 | * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now |
| 818 | * so that register information in the sigcontext is correct and |
| 819 | * then notify the tracer before entering the signal handler. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | */ |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 821 | stepping = test_thread_flag(TIF_SINGLESTEP); |
| 822 | if (stepping) |
| 823 | user_disable_single_step(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 825 | failed = (setup_rt_frame(ksig, regs) < 0); |
| 826 | if (!failed) { |
| 827 | /* |
| 828 | * Clear the direction flag as per the ABI for function entry. |
Jiri Olsa | ddd40da | 2013-05-01 17:25:43 +0200 | [diff] [blame] | 829 | * |
Jiri Olsa | 24cda10 | 2013-05-01 17:25:42 +0200 | [diff] [blame] | 830 | * Clear RF when entering the signal handler, because |
| 831 | * it might disable possible debug exception from the |
| 832 | * signal handler. |
Jiri Olsa | ddd40da | 2013-05-01 17:25:43 +0200 | [diff] [blame] | 833 | * |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 834 | * Clear TF for the case when it wasn't set by debugger to |
| 835 | * avoid the recursive send_sigtrap() in SIGTRAP handler. |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 836 | */ |
Jiri Olsa | ddd40da | 2013-05-01 17:25:43 +0200 | [diff] [blame] | 837 | regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); |
Oleg Nesterov | 66463db | 2014-09-02 19:57:13 +0200 | [diff] [blame] | 838 | /* |
| 839 | * Ensure the signal handler starts with the new fpu state. |
| 840 | */ |
Fenghua Yu | b860eb8 | 2020-05-12 07:54:39 -0700 | [diff] [blame] | 841 | fpu__clear_user_states(fpu); |
Al Viro | a610d6e | 2012-05-21 23:42:15 -0400 | [diff] [blame] | 842 | } |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 843 | signal_setup_done(failed, ksig, stepping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | } |
| 845 | |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 846 | static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) |
| 847 | { |
Andy Lutomirski | 609c19a | 2016-07-26 23:12:22 -0700 | [diff] [blame] | 848 | #ifdef CONFIG_IA32_EMULATION |
Oleg Nesterov | b2e9df8 | 2021-02-01 18:47:16 +0100 | [diff] [blame] | 849 | if (current->restart_block.arch_data & TS_COMPAT) |
Dmitry V. Levin | 95d97ad | 2015-12-17 23:56:52 +0000 | [diff] [blame] | 850 | return __NR_ia32_restart_syscall; |
| 851 | #endif |
| 852 | #ifdef CONFIG_X86_X32_ABI |
| 853 | return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); |
| 854 | #else |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 855 | return __NR_restart_syscall; |
Dmitry V. Levin | 95d97ad | 2015-12-17 23:56:52 +0000 | [diff] [blame] | 856 | #endif |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 857 | } |
Hiroshi Shimamoto | 5791775 | 2008-10-29 18:46:40 -0700 | [diff] [blame] | 858 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | /* |
| 860 | * Note that 'init' is a special process: it doesn't get signals it doesn't |
| 861 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
| 862 | * mistake. |
| 863 | */ |
Jens Axboe | 12db8b6 | 2020-10-26 14:32:28 -0600 | [diff] [blame] | 864 | void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 866 | struct ksignal ksig; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | |
Jens Axboe | 12db8b6 | 2020-10-26 14:32:28 -0600 | [diff] [blame] | 868 | if (has_signal && get_signal(&ksig)) { |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 869 | /* Whee! Actually deliver the signal. */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 870 | handle_signal(&ksig, regs); |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 871 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | } |
| 873 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | /* Did we come from a system call? */ |
H. Peter Anvin | 9ddcb87 | 2021-05-10 11:53:15 -0700 | [diff] [blame] | 875 | if (syscall_get_nr(current, regs) != -1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | /* Restart the system call - no handlers present */ |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 877 | switch (syscall_get_error(current, regs)) { |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 878 | case -ERESTARTNOHAND: |
| 879 | case -ERESTARTSYS: |
| 880 | case -ERESTARTNOINTR: |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 881 | regs->ax = regs->orig_ax; |
| 882 | regs->ip -= 2; |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 883 | break; |
| 884 | |
| 885 | case -ERESTART_RESTARTBLOCK: |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 886 | regs->ax = get_nr_restart_syscall(regs); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 887 | regs->ip -= 2; |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 888 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | } |
| 890 | } |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 891 | |
Harvey Harrison | ac66f3f | 2008-02-08 12:09:58 -0800 | [diff] [blame] | 892 | /* |
| 893 | * If there's no signal to deliver, we just put the saved sigmask |
| 894 | * back. |
| 895 | */ |
Al Viro | 51a7b44 | 2012-05-21 23:33:55 -0400 | [diff] [blame] | 896 | restore_saved_sigmask(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | } |
| 898 | |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 899 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
| 900 | { |
| 901 | struct task_struct *me = current; |
| 902 | |
| 903 | if (show_unhandled_signals && printk_ratelimit()) { |
Hiroshi Shimamoto | ae417bb | 2008-12-16 14:02:16 -0800 | [diff] [blame] | 904 | printk("%s" |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 905 | "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", |
Hiroshi Shimamoto | ae417bb | 2008-12-16 14:02:16 -0800 | [diff] [blame] | 906 | task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 907 | me->comm, me->pid, where, frame, |
| 908 | regs->ip, regs->sp, regs->orig_ax); |
Markus Trippelsdorf | 1c99a68 | 2017-04-07 14:09:04 +0200 | [diff] [blame] | 909 | print_vma_addr(KERN_CONT " in ", regs->ip); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 910 | pr_cont("\n"); |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 911 | } |
| 912 | |
Eric W. Biederman | 3cf5d07 | 2019-05-23 10:17:27 -0500 | [diff] [blame] | 913 | force_sig(SIGSEGV); |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 914 | } |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 915 | |
Thomas Gleixner | 3aac3eb | 2021-10-21 15:55:06 -0700 | [diff] [blame] | 916 | #ifdef CONFIG_DYNAMIC_SIGFRAME |
| 917 | #ifdef CONFIG_STRICT_SIGALTSTACK_SIZE |
| 918 | static bool strict_sigaltstack_size __ro_after_init = true; |
| 919 | #else |
| 920 | static bool strict_sigaltstack_size __ro_after_init = false; |
| 921 | #endif |
| 922 | |
| 923 | static int __init strict_sas_size(char *arg) |
| 924 | { |
| 925 | return kstrtobool(arg, &strict_sigaltstack_size); |
| 926 | } |
| 927 | __setup("strict_sas_size", strict_sas_size); |
| 928 | |
| 929 | /* |
| 930 | * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512 |
| 931 | * exceeds that size already. As such programs might never use the |
| 932 | * sigaltstack they just continued to work. While always checking against |
| 933 | * the real size would be correct, this might be considered a regression. |
| 934 | * |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 935 | * Therefore avoid the sanity check, unless enforced by kernel |
| 936 | * configuration or command line option. |
| 937 | * |
| 938 | * When dynamic FPU features are supported, the check is also enforced when |
| 939 | * the task has permissions to use dynamic features. Tasks which have no |
| 940 | * permission are checked against the size of the non-dynamic feature set |
| 941 | * if strict checking is enabled. This avoids forcing all tasks on the |
| 942 | * system to allocate large sigaltstacks even if they are never going |
| 943 | * to use a dynamic feature. As this is serialized via sighand::siglock |
| 944 | * any permission request for a dynamic feature either happened already |
| 945 | * or will see the newly install sigaltstack size in the permission checks. |
Thomas Gleixner | 3aac3eb | 2021-10-21 15:55:06 -0700 | [diff] [blame] | 946 | */ |
| 947 | bool sigaltstack_size_valid(size_t ss_size) |
| 948 | { |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 949 | unsigned long fsize = max_frame_size - fpu_default_state_size; |
| 950 | u64 mask; |
| 951 | |
Thomas Gleixner | 3aac3eb | 2021-10-21 15:55:06 -0700 | [diff] [blame] | 952 | lockdep_assert_held(¤t->sighand->siglock); |
| 953 | |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 954 | if (!fpu_state_size_dynamic() && !strict_sigaltstack_size) |
| 955 | return true; |
| 956 | |
| 957 | fsize += current->group_leader->thread.fpu.perm.__user_state_size; |
| 958 | if (likely(ss_size > fsize)) |
| 959 | return true; |
| 960 | |
Thomas Gleixner | 3aac3eb | 2021-10-21 15:55:06 -0700 | [diff] [blame] | 961 | if (strict_sigaltstack_size) |
Thomas Gleixner | 4b7ca60 | 2021-10-21 15:55:12 -0700 | [diff] [blame] | 962 | return ss_size > fsize; |
| 963 | |
| 964 | mask = current->group_leader->thread.fpu.perm.__state_perm; |
| 965 | if (mask & XFEATURE_MASK_USER_DYNAMIC) |
| 966 | return ss_size > fsize; |
Thomas Gleixner | 3aac3eb | 2021-10-21 15:55:06 -0700 | [diff] [blame] | 967 | |
| 968 | return true; |
| 969 | } |
| 970 | #endif /* CONFIG_DYNAMIC_SIGFRAME */ |
| 971 | |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 972 | #ifdef CONFIG_X86_X32_ABI |
Brian Gerst | 27dd84f | 2020-03-13 15:51:31 -0400 | [diff] [blame] | 973 | COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 974 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 975 | struct pt_regs *regs = current_pt_regs(); |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 976 | struct rt_sigframe_x32 __user *frame; |
| 977 | sigset_t set; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 978 | unsigned long uc_flags; |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 979 | |
| 980 | frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); |
| 981 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 982 | if (!access_ok(frame, sizeof(*frame))) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 983 | goto badframe; |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 984 | if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask)) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 985 | goto badframe; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 986 | if (__get_user(uc_flags, &frame->uc.uc_flags)) |
| 987 | goto badframe; |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 988 | |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 989 | set_current_blocked(&set); |
| 990 | |
Thomas Gleixner | ee4ecdf | 2021-09-08 15:29:35 +0200 | [diff] [blame] | 991 | if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 992 | goto badframe; |
| 993 | |
Al Viro | 9026843 | 2012-12-14 14:47:53 -0500 | [diff] [blame] | 994 | if (compat_restore_altstack(&frame->uc.uc_stack)) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 995 | goto badframe; |
| 996 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 997 | return regs->ax; |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 998 | |
| 999 | badframe: |
| 1000 | signal_fault(regs, frame, "x32 rt_sigreturn"); |
| 1001 | return 0; |
| 1002 | } |
| 1003 | #endif |