Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
| 6 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 7 | * 2000-2002 x86-64 support by Andi Kleen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 9 | |
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 11 | |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 12 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 13 | #include <linux/sched/task_stack.h> |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 14 | #include <linux/mm.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 15 | #include <linux/smp.h> |
| 16 | #include <linux/kernel.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 17 | #include <linux/errno.h> |
| 18 | #include <linux/wait.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 19 | #include <linux/tracehook.h> |
| 20 | #include <linux/unistd.h> |
| 21 | #include <linux/stddef.h> |
| 22 | #include <linux/personality.h> |
| 23 | #include <linux/uaccess.h> |
Avi Kivity | 7c68af6 | 2009-09-19 09:40:22 +0300 | [diff] [blame] | 24 | #include <linux/user-return-notifier.h> |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 25 | #include <linux/uprobes.h> |
Frederic Weisbecker | 91d1aa43 | 2012-11-27 19:33:25 +0100 | [diff] [blame] | 26 | #include <linux/context_tracking.h> |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <asm/processor.h> |
| 29 | #include <asm/ucontext.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 30 | #include <asm/fpu/internal.h> |
Ingo Molnar | fcbc99c | 2015-04-30 08:45:02 +0200 | [diff] [blame] | 31 | #include <asm/fpu/signal.h> |
Roland McGrath | 6c3652e | 2008-01-30 13:30:42 +0100 | [diff] [blame] | 32 | #include <asm/vdso.h> |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 33 | #include <asm/mce.h> |
H. Peter Anvin | f28f0c2 | 2012-02-19 07:38:43 -0800 | [diff] [blame] | 34 | #include <asm/sighandling.h> |
Brian Gerst | ba3e127 | 2015-07-29 01:41:21 -0400 | [diff] [blame] | 35 | #include <asm/vm86.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 36 | |
| 37 | #ifdef CONFIG_X86_64 |
| 38 | #include <asm/proto.h> |
| 39 | #include <asm/ia32_unistd.h> |
Hiroshi Shimamoto | 5c9b3a0 | 2008-11-21 17:36:41 -0800 | [diff] [blame] | 40 | #endif /* CONFIG_X86_64 */ |
| 41 | |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 42 | #include <asm/syscall.h> |
Jaswinder Singh | bbc1f69 | 2008-07-21 21:34:13 +0530 | [diff] [blame] | 43 | #include <asm/syscalls.h> |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 44 | |
Hiroshi Shimamoto | 41af86f | 2008-12-17 18:50:32 -0800 | [diff] [blame] | 45 | #include <asm/sigframe.h> |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 46 | #include <asm/signal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 48 | #define COPY(x) do { \ |
| 49 | get_user_ex(regs->x, &sc->x); \ |
| 50 | } while (0) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 51 | |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 52 | #define GET_SEG(seg) ({ \ |
| 53 | unsigned short tmp; \ |
| 54 | get_user_ex(tmp, &sc->seg); \ |
| 55 | tmp; \ |
| 56 | }) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 57 | |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 58 | #define COPY_SEG(seg) do { \ |
| 59 | regs->seg = GET_SEG(seg); \ |
| 60 | } while (0) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 61 | |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 62 | #define COPY_SEG_CPL3(seg) do { \ |
| 63 | regs->seg = GET_SEG(seg) | 3; \ |
| 64 | } while (0) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 65 | |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 66 | #ifdef CONFIG_X86_64 |
| 67 | /* |
| 68 | * If regs->ss will cause an IRET fault, change it. Otherwise leave it |
| 69 | * alone. Using this generally makes no sense unless |
| 70 | * user_64bit_mode(regs) would return true. |
| 71 | */ |
| 72 | static void force_valid_ss(struct pt_regs *regs) |
| 73 | { |
| 74 | u32 ar; |
| 75 | asm volatile ("lar %[old_ss], %[ar]\n\t" |
| 76 | "jz 1f\n\t" /* If invalid: */ |
| 77 | "xorl %[ar], %[ar]\n\t" /* set ar = 0 */ |
| 78 | "1:" |
| 79 | : [ar] "=r" (ar) |
| 80 | : [old_ss] "rm" ((u16)regs->ss)); |
| 81 | |
| 82 | /* |
| 83 | * For a valid 64-bit user context, we need DPL 3, type |
| 84 | * read-write data or read-write exp-down data, and S and P |
| 85 | * set. We can't use VERW because VERW doesn't check the |
| 86 | * P bit. |
| 87 | */ |
| 88 | ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK; |
| 89 | if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) && |
| 90 | ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN)) |
| 91 | regs->ss = __USER_DS; |
| 92 | } |
| 93 | #endif |
| 94 | |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 95 | static int restore_sigcontext(struct pt_regs *regs, |
| 96 | struct sigcontext __user *sc, |
| 97 | unsigned long uc_flags) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 98 | { |
Ingo Molnar | 530e5c8 | 2015-09-05 09:32:39 +0200 | [diff] [blame] | 99 | unsigned long buf_val; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 100 | void __user *buf; |
| 101 | unsigned int tmpflags; |
| 102 | unsigned int err = 0; |
| 103 | |
| 104 | /* Always make any pending restarted system calls return -EINTR */ |
Andy Lutomirski | f56141e | 2015-02-12 15:01:14 -0800 | [diff] [blame] | 105 | current->restart_block.fn = do_no_restart_syscall; |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 106 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 107 | get_user_try { |
| 108 | |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 109 | #ifdef CONFIG_X86_32 |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 110 | set_user_gs(regs, GET_SEG(gs)); |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 111 | COPY_SEG(fs); |
| 112 | COPY_SEG(es); |
| 113 | COPY_SEG(ds); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 114 | #endif /* CONFIG_X86_32 */ |
| 115 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 116 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 117 | COPY(dx); COPY(cx); COPY(ip); COPY(ax); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 118 | |
| 119 | #ifdef CONFIG_X86_64 |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 120 | COPY(r8); |
| 121 | COPY(r9); |
| 122 | COPY(r10); |
| 123 | COPY(r11); |
| 124 | COPY(r12); |
| 125 | COPY(r13); |
| 126 | COPY(r14); |
| 127 | COPY(r15); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 128 | #endif /* CONFIG_X86_64 */ |
| 129 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 130 | COPY_SEG_CPL3(cs); |
| 131 | COPY_SEG_CPL3(ss); |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 132 | |
| 133 | #ifdef CONFIG_X86_64 |
| 134 | /* |
| 135 | * Fix up SS if needed for the benefit of old DOSEMU and |
| 136 | * CRIU. |
| 137 | */ |
| 138 | if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && |
| 139 | user_64bit_mode(regs))) |
| 140 | force_valid_ss(regs); |
| 141 | #endif |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 142 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 143 | get_user_ex(tmpflags, &sc->flags); |
| 144 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); |
| 145 | regs->orig_ax = -1; /* disable syscall checks */ |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 146 | |
Ingo Molnar | 530e5c8 | 2015-09-05 09:32:39 +0200 | [diff] [blame] | 147 | get_user_ex(buf_val, &sc->fpstate); |
| 148 | buf = (void __user *)buf_val; |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 149 | } get_user_catch(err); |
| 150 | |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 151 | err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32)); |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 152 | |
Brian Gerst | 1daeaa3 | 2015-03-21 18:54:21 -0400 | [diff] [blame] | 153 | force_iret(); |
| 154 | |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 155 | return err; |
| 156 | } |
| 157 | |
H. Peter Anvin | 8513942 | 2012-02-19 07:43:09 -0800 | [diff] [blame] | 158 | int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, |
| 159 | struct pt_regs *regs, unsigned long mask) |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 160 | { |
| 161 | int err = 0; |
| 162 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 163 | put_user_try { |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 164 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 165 | #ifdef CONFIG_X86_32 |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 166 | put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs); |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 167 | put_user_ex(regs->fs, (unsigned int __user *)&sc->fs); |
| 168 | put_user_ex(regs->es, (unsigned int __user *)&sc->es); |
| 169 | put_user_ex(regs->ds, (unsigned int __user *)&sc->ds); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 170 | #endif /* CONFIG_X86_32 */ |
| 171 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 172 | put_user_ex(regs->di, &sc->di); |
| 173 | put_user_ex(regs->si, &sc->si); |
| 174 | put_user_ex(regs->bp, &sc->bp); |
| 175 | put_user_ex(regs->sp, &sc->sp); |
| 176 | put_user_ex(regs->bx, &sc->bx); |
| 177 | put_user_ex(regs->dx, &sc->dx); |
| 178 | put_user_ex(regs->cx, &sc->cx); |
| 179 | put_user_ex(regs->ax, &sc->ax); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 180 | #ifdef CONFIG_X86_64 |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 181 | put_user_ex(regs->r8, &sc->r8); |
| 182 | put_user_ex(regs->r9, &sc->r9); |
| 183 | put_user_ex(regs->r10, &sc->r10); |
| 184 | put_user_ex(regs->r11, &sc->r11); |
| 185 | put_user_ex(regs->r12, &sc->r12); |
| 186 | put_user_ex(regs->r13, &sc->r13); |
| 187 | put_user_ex(regs->r14, &sc->r14); |
| 188 | put_user_ex(regs->r15, &sc->r15); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 189 | #endif /* CONFIG_X86_64 */ |
| 190 | |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 191 | put_user_ex(current->thread.trap_nr, &sc->trapno); |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 192 | put_user_ex(current->thread.error_code, &sc->err); |
| 193 | put_user_ex(regs->ip, &sc->ip); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 194 | #ifdef CONFIG_X86_32 |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 195 | put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); |
| 196 | put_user_ex(regs->flags, &sc->flags); |
| 197 | put_user_ex(regs->sp, &sc->sp_at_signal); |
| 198 | put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 199 | #else /* !CONFIG_X86_32 */ |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 200 | put_user_ex(regs->flags, &sc->flags); |
| 201 | put_user_ex(regs->cs, &sc->cs); |
Linus Torvalds | ed596cd | 2015-08-13 08:25:20 -0700 | [diff] [blame] | 202 | put_user_ex(0, &sc->gs); |
| 203 | put_user_ex(0, &sc->fs); |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 204 | put_user_ex(regs->ss, &sc->ss); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 205 | #endif /* CONFIG_X86_32 */ |
| 206 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 207 | put_user_ex(fpstate, &sc->fpstate); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 208 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 209 | /* non-iBCS2 extensions.. */ |
| 210 | put_user_ex(mask, &sc->oldmask); |
| 211 | put_user_ex(current->thread.cr2, &sc->cr2); |
| 212 | } put_user_catch(err); |
Hiroshi Shimamoto | 2601657 | 2008-11-24 18:21:37 -0800 | [diff] [blame] | 213 | |
| 214 | return err; |
| 215 | } |
| 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | * Set up a signal frame. |
| 219 | */ |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 220 | |
| 221 | /* |
| 222 | * Determine which stack to use.. |
| 223 | */ |
Hiroshi Shimamoto | 1fae027 | 2009-02-27 10:30:32 -0800 | [diff] [blame] | 224 | static unsigned long align_sigframe(unsigned long sp) |
| 225 | { |
| 226 | #ifdef CONFIG_X86_32 |
| 227 | /* |
| 228 | * Align the stack pointer according to the i386 ABI, |
| 229 | * i.e. so that on function entry ((sp + 4) & 15) == 0. |
| 230 | */ |
| 231 | sp = ((sp + 4) & -16ul) - 4; |
| 232 | #else /* !CONFIG_X86_32 */ |
| 233 | sp = round_down(sp, 16) - 8; |
| 234 | #endif |
| 235 | return sp; |
| 236 | } |
| 237 | |
Denys Vlasenko | dae0f30 | 2015-09-28 14:23:57 +0200 | [diff] [blame] | 238 | static void __user * |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 239 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
| 240 | void __user **fpstate) |
| 241 | { |
| 242 | /* Default to using normal stack */ |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 243 | unsigned long math_size = 0; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 244 | unsigned long sp = regs->sp; |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 245 | unsigned long buf_fx = 0; |
Hiroshi Shimamoto | 14fc9fb | 2009-03-19 10:56:29 -0700 | [diff] [blame] | 246 | int onsigstack = on_sig_stack(sp); |
Ingo Molnar | c5bedc6 | 2015-04-23 12:49:20 +0200 | [diff] [blame] | 247 | struct fpu *fpu = ¤t->thread.fpu; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 248 | |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 249 | /* redzone */ |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 250 | if (IS_ENABLED(CONFIG_X86_64)) |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 251 | sp -= 128; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 252 | |
Stas Sergeev | 0b4521e | 2016-04-14 23:20:02 +0300 | [diff] [blame] | 253 | /* This is the X/Open sanctioned signal stack switching. */ |
| 254 | if (ka->sa.sa_flags & SA_ONSTACK) { |
| 255 | if (sas_ss_flags(sp) == 0) |
| 256 | sp = current->sas_ss_sp + current->sas_ss_size; |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 257 | } else if (IS_ENABLED(CONFIG_X86_32) && |
Stas Sergeev | 0b4521e | 2016-04-14 23:20:02 +0300 | [diff] [blame] | 258 | !onsigstack && |
| 259 | (regs->ss & 0xffff) != __USER_DS && |
| 260 | !(ka->sa.sa_flags & SA_RESTORER) && |
| 261 | ka->sa.sa_restorer) { |
| 262 | /* This is the legacy signal stack switching. */ |
| 263 | sp = (unsigned long) ka->sa.sa_restorer; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 264 | } |
| 265 | |
Ingo Molnar | c5bedc6 | 2015-04-23 12:49:20 +0200 | [diff] [blame] | 266 | if (fpu->fpstate_active) { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 267 | sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), |
Ingo Molnar | 82c0e45 | 2015-04-29 21:09:18 +0200 | [diff] [blame] | 268 | &buf_fx, &math_size); |
Hiroshi Shimamoto | 2505170 | 2009-03-02 17:20:01 -0800 | [diff] [blame] | 269 | *fpstate = (void __user *)sp; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 270 | } |
| 271 | |
Hiroshi Shimamoto | 14fc9fb | 2009-03-19 10:56:29 -0700 | [diff] [blame] | 272 | sp = align_sigframe(sp - frame_size); |
| 273 | |
| 274 | /* |
| 275 | * If we are on the alternate signal stack and would overflow it, don't. |
| 276 | * Return an always-bogus address instead so we will die with SIGSEGV. |
| 277 | */ |
| 278 | if (onsigstack && !likely(on_sig_stack(sp))) |
| 279 | return (void __user *)-1L; |
| 280 | |
Suresh Siddha | 72a671c | 2012-07-24 16:05:29 -0700 | [diff] [blame] | 281 | /* save i387 and extended state */ |
Ingo Molnar | c5bedc6 | 2015-04-23 12:49:20 +0200 | [diff] [blame] | 282 | if (fpu->fpstate_active && |
Ingo Molnar | c8e14041 | 2015-04-28 11:35:20 +0200 | [diff] [blame] | 283 | copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) |
Hiroshi Shimamoto | 14fc9fb | 2009-03-19 10:56:29 -0700 | [diff] [blame] | 284 | return (void __user *)-1L; |
| 285 | |
| 286 | return (void __user *)sp; |
Hiroshi Shimamoto | 75779f0 | 2009-02-27 10:29:57 -0800 | [diff] [blame] | 287 | } |
| 288 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 289 | #ifdef CONFIG_X86_32 |
| 290 | static const struct { |
| 291 | u16 poplmovl; |
| 292 | u32 val; |
| 293 | u16 int80; |
| 294 | } __attribute__((packed)) retcode = { |
| 295 | 0xb858, /* popl %eax; movl $..., %eax */ |
| 296 | __NR_sigreturn, |
| 297 | 0x80cd, /* int $0x80 */ |
| 298 | }; |
| 299 | |
| 300 | static const struct { |
| 301 | u8 movl; |
| 302 | u32 val; |
| 303 | u16 int80; |
| 304 | u8 pad; |
| 305 | } __attribute__((packed)) rt_retcode = { |
| 306 | 0xb8, /* movl $..., %eax */ |
| 307 | __NR_rt_sigreturn, |
| 308 | 0x80cd, /* int $0x80 */ |
| 309 | 0 |
| 310 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 312 | static int |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 313 | __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 314 | struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | struct sigframe __user *frame; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 317 | void __user *restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | int err = 0; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 319 | void __user *fpstate = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 321 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | |
| 323 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 324 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | |
Hiroshi Shimamoto | 2ba48e1 | 2008-09-12 17:02:53 -0700 | [diff] [blame] | 326 | if (__put_user(sig, &frame->sig)) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 327 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | |
Hiroshi Shimamoto | 2ba48e1 | 2008-09-12 17:02:53 -0700 | [diff] [blame] | 329 | if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 330 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
| 332 | if (_NSIG_WORDS > 1) { |
Hiroshi Shimamoto | 2ba48e1 | 2008-09-12 17:02:53 -0700 | [diff] [blame] | 333 | if (__copy_to_user(&frame->extramask, &set->sig[1], |
| 334 | sizeof(frame->extramask))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 335 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Roland McGrath | 1a3e4ca | 2008-04-09 01:29:27 -0700 | [diff] [blame] | 338 | if (current->mm->context.vdso) |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 339 | restorer = current->mm->context.vdso + |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 340 | vdso_image_32.sym___kernel_sigreturn; |
Andi Kleen | 9fbbd4d | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 341 | else |
Jan Engelhardt | ade1af7 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 342 | restorer = &frame->retcode; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 343 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
| 344 | restorer = ksig->ka.sa.sa_restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | |
| 346 | /* Set up to return from userspace. */ |
| 347 | err |= __put_user(restorer, &frame->pretcode); |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 348 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | /* |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 350 | * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | * |
| 352 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
| 353 | * reasons and because gdb uses it as a signature to notice |
| 354 | * signal handler stack frames. |
| 355 | */ |
Hiroshi Shimamoto | 4a61204 | 2008-11-11 19:09:29 -0800 | [diff] [blame] | 356 | err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | |
| 358 | if (err) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 359 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | /* Set up registers for signal handler */ |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 362 | regs->sp = (unsigned long)frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 363 | regs->ip = (unsigned long)ksig->ka.sa.sa_handler; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 364 | regs->ax = (unsigned long)sig; |
Harvey Harrison | 92bc205 | 2008-02-08 12:09:56 -0800 | [diff] [blame] | 365 | regs->dx = 0; |
| 366 | regs->cx = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 368 | regs->ds = __USER_DS; |
| 369 | regs->es = __USER_DS; |
| 370 | regs->ss = __USER_DS; |
| 371 | regs->cs = __USER_CS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 373 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | } |
| 375 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 376 | static int __setup_rt_frame(int sig, struct ksignal *ksig, |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 377 | sigset_t *set, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | struct rt_sigframe __user *frame; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 380 | void __user *restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | int err = 0; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 382 | void __user *fpstate = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 384 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | |
| 386 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 387 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 389 | put_user_try { |
| 390 | put_user_ex(sig, &frame->sig); |
| 391 | put_user_ex(&frame->info, &frame->pinfo); |
| 392 | put_user_ex(&frame->uc, &frame->puc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 394 | /* Create the ucontext. */ |
Borislav Petkov | d366bf7 | 2016-04-04 22:25:02 +0200 | [diff] [blame] | 395 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 396 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); |
| 397 | else |
| 398 | put_user_ex(0, &frame->uc.uc_flags); |
| 399 | put_user_ex(0, &frame->uc.uc_link); |
Al Viro | bd1c149a | 2013-09-01 20:35:01 +0100 | [diff] [blame] | 400 | save_altstack_ex(&frame->uc.uc_stack, regs->sp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 402 | /* Set up to return from userspace. */ |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 403 | restorer = current->mm->context.vdso + |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 404 | vdso_image_32.sym___kernel_rt_sigreturn; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 405 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
| 406 | restorer = ksig->ka.sa.sa_restorer; |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 407 | put_user_ex(restorer, &frame->pretcode); |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 408 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 409 | /* |
| 410 | * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 |
| 411 | * |
| 412 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
| 413 | * reasons and because gdb uses it as a signature to notice |
| 414 | * signal handler stack frames. |
| 415 | */ |
| 416 | put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); |
| 417 | } put_user_catch(err); |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 418 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 419 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 420 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
| 421 | regs, set->sig[0]); |
| 422 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | |
| 424 | if (err) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 425 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
| 427 | /* Set up registers for signal handler */ |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 428 | regs->sp = (unsigned long)frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 429 | regs->ip = (unsigned long)ksig->ka.sa.sa_handler; |
Hiroshi Shimamoto | 13ad772 | 2008-09-05 16:28:38 -0700 | [diff] [blame] | 430 | regs->ax = (unsigned long)sig; |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 431 | regs->dx = (unsigned long)&frame->info; |
| 432 | regs->cx = (unsigned long)&frame->uc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 434 | regs->ds = __USER_DS; |
| 435 | regs->es = __USER_DS; |
| 436 | regs->ss = __USER_DS; |
| 437 | regs->cs = __USER_CS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 439 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | } |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 441 | #else /* !CONFIG_X86_32 */ |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 442 | static unsigned long frame_uc_flags(struct pt_regs *regs) |
| 443 | { |
| 444 | unsigned long flags; |
| 445 | |
Borislav Petkov | d366bf7 | 2016-04-04 22:25:02 +0200 | [diff] [blame] | 446 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 447 | flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS; |
| 448 | else |
| 449 | flags = UC_SIGCONTEXT_SS; |
| 450 | |
| 451 | if (likely(user_64bit_mode(regs))) |
| 452 | flags |= UC_STRICT_RESTORE_SS; |
| 453 | |
| 454 | return flags; |
| 455 | } |
| 456 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 457 | static int __setup_rt_frame(int sig, struct ksignal *ksig, |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 458 | sigset_t *set, struct pt_regs *regs) |
| 459 | { |
| 460 | struct rt_sigframe __user *frame; |
| 461 | void __user *fp = NULL; |
| 462 | int err = 0; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 463 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 464 | frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 465 | |
| 466 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
| 467 | return -EFAULT; |
| 468 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 469 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
| 470 | if (copy_siginfo_to_user(&frame->info, &ksig->info)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 471 | return -EFAULT; |
| 472 | } |
| 473 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 474 | put_user_try { |
| 475 | /* Create the ucontext. */ |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 476 | put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 477 | put_user_ex(0, &frame->uc.uc_link); |
Al Viro | bd1c149a | 2013-09-01 20:35:01 +0100 | [diff] [blame] | 478 | save_altstack_ex(&frame->uc.uc_stack, regs->sp); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 479 | |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 480 | /* Set up to return from userspace. If provided, use a stub |
| 481 | already in userspace. */ |
| 482 | /* x86-64 should always use SA_RESTORER. */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 483 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
| 484 | put_user_ex(ksig->ka.sa.sa_restorer, &frame->pretcode); |
Hiroshi Shimamoto | 98e3d45 | 2009-01-23 15:50:10 -0800 | [diff] [blame] | 485 | } else { |
| 486 | /* could use a vstub here */ |
| 487 | err |= -EFAULT; |
| 488 | } |
| 489 | } put_user_catch(err); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 490 | |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 491 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); |
| 492 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
| 493 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 494 | if (err) |
| 495 | return -EFAULT; |
| 496 | |
| 497 | /* Set up registers for signal handler */ |
| 498 | regs->di = sig; |
| 499 | /* In case the signal handler was declared without prototypes */ |
| 500 | regs->ax = 0; |
| 501 | |
| 502 | /* This also works for non SA_SIGINFO handlers because they expect the |
| 503 | next argument after the signal number on the stack. */ |
| 504 | regs->si = (unsigned long)&frame->info; |
| 505 | regs->dx = (unsigned long)&frame->uc; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 506 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 507 | |
| 508 | regs->sp = (unsigned long)frame; |
| 509 | |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 510 | /* |
| 511 | * Set up the CS and SS registers to run signal handlers in |
| 512 | * 64-bit mode, even if the handler happens to be interrupting |
| 513 | * 32-bit or 16-bit code. |
| 514 | * |
| 515 | * SS is subtle. In 64-bit mode, we don't need any particular |
| 516 | * SS descriptor, but we do need SS to be valid. It's possible |
| 517 | * that the old SS is entirely bogus -- this can happen if the |
| 518 | * signal we're trying to deliver is #GP or #SS caused by a bad |
| 519 | * SS value. We also have a compatbility issue here: DOSEMU |
| 520 | * relies on the contents of the SS register indicating the |
| 521 | * SS value at the time of the signal, even though that code in |
| 522 | * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU |
| 523 | * avoids relying on sigreturn to restore SS; instead it uses |
| 524 | * a trampoline.) So we do our best: if the old SS was valid, |
| 525 | * we keep it. Otherwise we replace it. |
| 526 | */ |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 527 | regs->cs = __USER_CS; |
| 528 | |
Andy Lutomirski | 8ff5bd2 | 2016-02-16 15:09:02 -0800 | [diff] [blame] | 529 | if (unlikely(regs->ss != __USER_DS)) |
| 530 | force_valid_ss(regs); |
| 531 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 532 | return 0; |
| 533 | } |
| 534 | #endif /* CONFIG_X86_32 */ |
| 535 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 536 | static int x32_setup_rt_frame(struct ksignal *ksig, |
| 537 | compat_sigset_t *set, |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 538 | struct pt_regs *regs) |
| 539 | { |
| 540 | #ifdef CONFIG_X86_X32_ABI |
| 541 | struct rt_sigframe_x32 __user *frame; |
| 542 | void __user *restorer; |
| 543 | int err = 0; |
| 544 | void __user *fpstate = NULL; |
| 545 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 546 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 547 | |
| 548 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
| 549 | return -EFAULT; |
| 550 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 551 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 552 | if (__copy_siginfo_to_user32(&frame->info, &ksig->info, true)) |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 553 | return -EFAULT; |
| 554 | } |
| 555 | |
| 556 | put_user_try { |
| 557 | /* Create the ucontext. */ |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 558 | put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 559 | put_user_ex(0, &frame->uc.uc_link); |
Al Viro | bd1c149a | 2013-09-01 20:35:01 +0100 | [diff] [blame] | 560 | compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 561 | put_user_ex(0, &frame->uc.uc__pad0); |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 562 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 563 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
| 564 | restorer = ksig->ka.sa.sa_restorer; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 565 | } else { |
| 566 | /* could use a vstub here */ |
| 567 | restorer = NULL; |
| 568 | err |= -EFAULT; |
| 569 | } |
| 570 | put_user_ex(restorer, &frame->pretcode); |
| 571 | } put_user_catch(err); |
| 572 | |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 573 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
| 574 | regs, set->sig[0]); |
| 575 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
| 576 | |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 577 | if (err) |
| 578 | return -EFAULT; |
| 579 | |
| 580 | /* Set up registers for signal handler */ |
| 581 | regs->sp = (unsigned long) frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 582 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 583 | |
| 584 | /* We use the x32 calling convention here... */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 585 | regs->di = ksig->sig; |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 586 | regs->si = (unsigned long) &frame->info; |
| 587 | regs->dx = (unsigned long) &frame->uc; |
| 588 | |
| 589 | loadsegment(ds, __USER_DS); |
| 590 | loadsegment(es, __USER_DS); |
| 591 | |
| 592 | regs->cs = __USER_CS; |
| 593 | regs->ss = __USER_DS; |
| 594 | #endif /* CONFIG_X86_X32_ABI */ |
| 595 | |
| 596 | return 0; |
| 597 | } |
| 598 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 599 | /* |
| 600 | * Do a signal return; undo the signal stack. |
| 601 | */ |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 602 | #ifdef CONFIG_X86_32 |
Andi Kleen | ff49103 | 2013-08-05 15:02:40 -0700 | [diff] [blame] | 603 | asmlinkage unsigned long sys_sigreturn(void) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 604 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 605 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 606 | struct sigframe __user *frame; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 607 | sigset_t set; |
| 608 | |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 609 | frame = (struct sigframe __user *)(regs->sp - 8); |
| 610 | |
| 611 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| 612 | goto badframe; |
| 613 | if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 |
| 614 | && __copy_from_user(&set.sig[1], &frame->extramask, |
| 615 | sizeof(frame->extramask)))) |
| 616 | goto badframe; |
| 617 | |
Oleg Nesterov | 3982294 | 2011-07-10 21:27:27 +0200 | [diff] [blame] | 618 | set_current_blocked(&set); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 619 | |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 620 | /* |
| 621 | * x86_32 has no uc_flags bits relevant to restore_sigcontext. |
| 622 | * Save a few cycles by skipping the __get_user. |
| 623 | */ |
| 624 | if (restore_sigcontext(regs, &frame->sc, 0)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 625 | goto badframe; |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 626 | return regs->ax; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 627 | |
| 628 | badframe: |
Hiroshi Shimamoto | ae417bb | 2008-12-16 14:02:16 -0800 | [diff] [blame] | 629 | signal_fault(regs, frame, "sigreturn"); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 630 | |
| 631 | return 0; |
| 632 | } |
Hiroshi Shimamoto | e5fa2d0 | 2008-11-24 18:24:11 -0800 | [diff] [blame] | 633 | #endif /* CONFIG_X86_32 */ |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 634 | |
Andi Kleen | ff49103 | 2013-08-05 15:02:40 -0700 | [diff] [blame] | 635 | asmlinkage long sys_rt_sigreturn(void) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 636 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 637 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 638 | struct rt_sigframe __user *frame; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 639 | sigset_t set; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 640 | unsigned long uc_flags; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 641 | |
| 642 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); |
| 643 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| 644 | goto badframe; |
| 645 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
| 646 | goto badframe; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 647 | if (__get_user(uc_flags, &frame->uc.uc_flags)) |
| 648 | goto badframe; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 649 | |
Oleg Nesterov | e9bd3f0 | 2011-04-27 21:09:39 +0200 | [diff] [blame] | 650 | set_current_blocked(&set); |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 651 | |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 652 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 653 | goto badframe; |
| 654 | |
Al Viro | c40702c | 2012-11-20 14:24:26 -0500 | [diff] [blame] | 655 | if (restore_altstack(&frame->uc.uc_stack)) |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 656 | goto badframe; |
| 657 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 658 | return regs->ax; |
Hiroshi Shimamoto | bfeb91a | 2008-11-24 18:23:12 -0800 | [diff] [blame] | 659 | |
| 660 | badframe: |
| 661 | signal_fault(regs, frame, "rt_sigreturn"); |
| 662 | return 0; |
| 663 | } |
| 664 | |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 665 | static inline int is_ia32_compat_frame(struct ksignal *ksig) |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 666 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 667 | return IS_ENABLED(CONFIG_IA32_EMULATION) && |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 668 | ksig->ka.sa.sa_flags & SA_IA32_ABI; |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 669 | } |
| 670 | |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 671 | static inline int is_ia32_frame(struct ksignal *ksig) |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 672 | { |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 673 | return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig); |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 674 | } |
| 675 | |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 676 | static inline int is_x32_frame(struct ksignal *ksig) |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 677 | { |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 678 | return IS_ENABLED(CONFIG_X86_X32_ABI) && |
| 679 | ksig->ka.sa.sa_flags & SA_X32_ABI; |
Ingo Molnar | 05012c1 | 2015-04-30 07:26:04 +0200 | [diff] [blame] | 680 | } |
| 681 | |
Roland McGrath | 7c1def1 | 2005-06-23 00:08:21 -0700 | [diff] [blame] | 682 | static int |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 683 | setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 684 | { |
Richard Weinberger | 3050a35 | 2014-07-13 17:43:51 +0200 | [diff] [blame] | 685 | int usig = ksig->sig; |
Al Viro | b7f9a11 | 2012-05-02 09:59:21 -0400 | [diff] [blame] | 686 | sigset_t *set = sigmask_to_save(); |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 687 | compat_sigset_t *cset = (compat_sigset_t *) set; |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 688 | |
| 689 | /* Set up the stack frame */ |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 690 | if (is_ia32_frame(ksig)) { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 691 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
| 692 | return ia32_setup_rt_frame(usig, ksig, cset, regs); |
Hiroshi Shimamoto | 455edbc | 2008-09-24 19:13:11 -0700 | [diff] [blame] | 693 | else |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 694 | return ia32_setup_frame(usig, ksig, cset, regs); |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 695 | } else if (is_x32_frame(ksig)) { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 696 | return x32_setup_rt_frame(ksig, cset, regs); |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 697 | } else { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 698 | return __setup_rt_frame(ksig->sig, ksig, set, regs); |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 699 | } |
Hiroshi Shimamoto | 1d13024 | 2008-09-05 16:28:06 -0700 | [diff] [blame] | 700 | } |
| 701 | |
Al Viro | a610d6e | 2012-05-21 23:42:15 -0400 | [diff] [blame] | 702 | static void |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 703 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | { |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 705 | bool stepping, failed; |
Ingo Molnar | c5bedc6 | 2015-04-23 12:49:20 +0200 | [diff] [blame] | 706 | struct fpu *fpu = ¤t->thread.fpu; |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 707 | |
Brian Gerst | 5ed92a8 | 2015-07-29 01:41:19 -0400 | [diff] [blame] | 708 | if (v8086_mode(regs)) |
| 709 | save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); |
| 710 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | /* Are we from a system call? */ |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 712 | if (syscall_get_nr(current, regs) >= 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | /* If so, check system call restarting.. */ |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 714 | switch (syscall_get_error(current, regs)) { |
Harvey Harrison | ac66f3f | 2008-02-08 12:09:58 -0800 | [diff] [blame] | 715 | case -ERESTART_RESTARTBLOCK: |
| 716 | case -ERESTARTNOHAND: |
| 717 | regs->ax = -EINTR; |
| 718 | break; |
| 719 | |
| 720 | case -ERESTARTSYS: |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 721 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 722 | regs->ax = -EINTR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | break; |
Harvey Harrison | ac66f3f | 2008-02-08 12:09:58 -0800 | [diff] [blame] | 724 | } |
| 725 | /* fallthrough */ |
| 726 | case -ERESTARTNOINTR: |
| 727 | regs->ax = regs->orig_ax; |
| 728 | regs->ip -= 2; |
| 729 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | } |
| 731 | } |
| 732 | |
| 733 | /* |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 734 | * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now |
| 735 | * so that register information in the sigcontext is correct and |
| 736 | * then notify the tracer before entering the signal handler. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | */ |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 738 | stepping = test_thread_flag(TIF_SINGLESTEP); |
| 739 | if (stepping) |
| 740 | user_disable_single_step(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 742 | failed = (setup_rt_frame(ksig, regs) < 0); |
| 743 | if (!failed) { |
| 744 | /* |
| 745 | * Clear the direction flag as per the ABI for function entry. |
Jiri Olsa | ddd40da | 2013-05-01 17:25:43 +0200 | [diff] [blame] | 746 | * |
Jiri Olsa | 24cda10 | 2013-05-01 17:25:42 +0200 | [diff] [blame] | 747 | * Clear RF when entering the signal handler, because |
| 748 | * it might disable possible debug exception from the |
| 749 | * signal handler. |
Jiri Olsa | ddd40da | 2013-05-01 17:25:43 +0200 | [diff] [blame] | 750 | * |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 751 | * Clear TF for the case when it wasn't set by debugger to |
| 752 | * avoid the recursive send_sigtrap() in SIGTRAP handler. |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 753 | */ |
Jiri Olsa | ddd40da | 2013-05-01 17:25:43 +0200 | [diff] [blame] | 754 | regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); |
Oleg Nesterov | 66463db | 2014-09-02 19:57:13 +0200 | [diff] [blame] | 755 | /* |
| 756 | * Ensure the signal handler starts with the new fpu state. |
| 757 | */ |
Ingo Molnar | c5bedc6 | 2015-04-23 12:49:20 +0200 | [diff] [blame] | 758 | if (fpu->fpstate_active) |
Ingo Molnar | fbce778 | 2015-04-30 07:12:46 +0200 | [diff] [blame] | 759 | fpu__clear(fpu); |
Al Viro | a610d6e | 2012-05-21 23:42:15 -0400 | [diff] [blame] | 760 | } |
Oleg Nesterov | fd0f86b | 2015-04-16 00:40:25 -0700 | [diff] [blame] | 761 | signal_setup_done(failed, ksig, stepping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | } |
| 763 | |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 764 | static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) |
| 765 | { |
Andy Lutomirski | 609c19a | 2016-07-26 23:12:22 -0700 | [diff] [blame] | 766 | /* |
| 767 | * This function is fundamentally broken as currently |
| 768 | * implemented. |
| 769 | * |
| 770 | * The idea is that we want to trigger a call to the |
| 771 | * restart_block() syscall and that we want in_ia32_syscall(), |
| 772 | * in_x32_syscall(), etc. to match whatever they were in the |
| 773 | * syscall being restarted. We assume that the syscall |
| 774 | * instruction at (regs->ip - 2) matches whatever syscall |
| 775 | * instruction we used to enter in the first place. |
| 776 | * |
| 777 | * The problem is that we can get here when ptrace pokes |
| 778 | * syscall-like values into regs even if we're not in a syscall |
| 779 | * at all. |
| 780 | * |
| 781 | * For now, we maintain historical behavior and guess based on |
| 782 | * stored state. We could do better by saving the actual |
| 783 | * syscall arch in restart_block or (with caveats on x32) by |
| 784 | * checking if regs->ip points to 'int $0x80'. The current |
| 785 | * behavior is incorrect if a tracer has a different bitness |
| 786 | * than the tracee. |
| 787 | */ |
| 788 | #ifdef CONFIG_IA32_EMULATION |
Andy Lutomirski | b9d989c | 2016-09-13 14:29:21 -0700 | [diff] [blame] | 789 | if (current->thread.status & (TS_COMPAT|TS_I386_REGS_POKED)) |
Dmitry V. Levin | 95d97ad | 2015-12-17 23:56:52 +0000 | [diff] [blame] | 790 | return __NR_ia32_restart_syscall; |
| 791 | #endif |
| 792 | #ifdef CONFIG_X86_X32_ABI |
| 793 | return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); |
| 794 | #else |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 795 | return __NR_restart_syscall; |
Dmitry V. Levin | 95d97ad | 2015-12-17 23:56:52 +0000 | [diff] [blame] | 796 | #endif |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 797 | } |
Hiroshi Shimamoto | 5791775 | 2008-10-29 18:46:40 -0700 | [diff] [blame] | 798 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | /* |
| 800 | * Note that 'init' is a special process: it doesn't get signals it doesn't |
| 801 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
| 802 | * mistake. |
| 803 | */ |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 804 | void do_signal(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | { |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 806 | struct ksignal ksig; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 808 | if (get_signal(&ksig)) { |
Ingo Molnar | 7e907f4 | 2008-03-06 10:33:08 +0100 | [diff] [blame] | 809 | /* Whee! Actually deliver the signal. */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 810 | handle_signal(&ksig, regs); |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 811 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | } |
| 813 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | /* Did we come from a system call? */ |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 815 | if (syscall_get_nr(current, regs) >= 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | /* Restart the system call - no handlers present */ |
Hiroshi Shimamoto | bb57925 | 2008-09-05 16:26:55 -0700 | [diff] [blame] | 817 | switch (syscall_get_error(current, regs)) { |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 818 | case -ERESTARTNOHAND: |
| 819 | case -ERESTARTSYS: |
| 820 | case -ERESTARTNOINTR: |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 821 | regs->ax = regs->orig_ax; |
| 822 | regs->ip -= 2; |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 823 | break; |
| 824 | |
| 825 | case -ERESTART_RESTARTBLOCK: |
Dmitry V. Levin | 22eab110 | 2015-12-01 00:54:36 +0300 | [diff] [blame] | 826 | regs->ax = get_nr_restart_syscall(regs); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 827 | regs->ip -= 2; |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 828 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | } |
| 830 | } |
David Howells | 283828f | 2006-01-18 17:44:00 -0800 | [diff] [blame] | 831 | |
Harvey Harrison | ac66f3f | 2008-02-08 12:09:58 -0800 | [diff] [blame] | 832 | /* |
| 833 | * If there's no signal to deliver, we just put the saved sigmask |
| 834 | * back. |
| 835 | */ |
Al Viro | 51a7b44 | 2012-05-21 23:33:55 -0400 | [diff] [blame] | 836 | restore_saved_sigmask(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | } |
| 838 | |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 839 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
| 840 | { |
| 841 | struct task_struct *me = current; |
| 842 | |
| 843 | if (show_unhandled_signals && printk_ratelimit()) { |
Hiroshi Shimamoto | ae417bb | 2008-12-16 14:02:16 -0800 | [diff] [blame] | 844 | printk("%s" |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 845 | "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", |
Hiroshi Shimamoto | ae417bb | 2008-12-16 14:02:16 -0800 | [diff] [blame] | 846 | task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 847 | me->comm, me->pid, where, frame, |
| 848 | regs->ip, regs->sp, regs->orig_ax); |
Markus Trippelsdorf | 1c99a68 | 2017-04-07 14:09:04 +0200 | [diff] [blame^] | 849 | print_vma_addr(KERN_CONT " in ", regs->ip); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 850 | pr_cont("\n"); |
Hiroshi Shimamoto | 72fa50f | 2008-09-05 16:27:11 -0700 | [diff] [blame] | 851 | } |
| 852 | |
| 853 | force_sig(SIGSEGV, me); |
| 854 | } |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 855 | |
| 856 | #ifdef CONFIG_X86_X32_ABI |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 857 | asmlinkage long sys32_x32_rt_sigreturn(void) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 858 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 859 | struct pt_regs *regs = current_pt_regs(); |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 860 | struct rt_sigframe_x32 __user *frame; |
| 861 | sigset_t set; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 862 | unsigned long uc_flags; |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 863 | |
| 864 | frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); |
| 865 | |
| 866 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| 867 | goto badframe; |
| 868 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
| 869 | goto badframe; |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 870 | if (__get_user(uc_flags, &frame->uc.uc_flags)) |
| 871 | goto badframe; |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 872 | |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 873 | set_current_blocked(&set); |
| 874 | |
Andy Lutomirski | 6c25da5 | 2016-02-16 15:09:03 -0800 | [diff] [blame] | 875 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 876 | goto badframe; |
| 877 | |
Al Viro | 9026843 | 2012-12-14 14:47:53 -0500 | [diff] [blame] | 878 | if (compat_restore_altstack(&frame->uc.uc_stack)) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 879 | goto badframe; |
| 880 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 881 | return regs->ax; |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 882 | |
| 883 | badframe: |
| 884 | signal_fault(regs, frame, "x32 rt_sigreturn"); |
| 885 | return 0; |
| 886 | } |
| 887 | #endif |