Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/arch/x86_64/ia32/ia32_signal.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | * |
| 7 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
| 8 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
| 9 | * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 13 | #include <linux/sched/task_stack.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/errno.h> |
| 18 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/unistd.h> |
| 20 | #include <linux/stddef.h> |
| 21 | #include <linux/personality.h> |
| 22 | #include <linux/compat.h> |
Andi Kleen | 9fbbd4d | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 23 | #include <linux/binfmts.h> |
Sami Tolvanen | 00198a6 | 2019-10-08 15:40:47 -0700 | [diff] [blame] | 24 | #include <linux/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/ucontext.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 26 | #include <linux/uaccess.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 27 | #include <asm/fpu/internal.h> |
Ingo Molnar | fcbc99c | 2015-04-30 08:45:02 +0200 | [diff] [blame] | 28 | #include <asm/fpu/signal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/ptrace.h> |
| 30 | #include <asm/ia32_unistd.h> |
| 31 | #include <asm/user32.h> |
Ingo Molnar | decb4c4 | 2015-09-05 09:32:43 +0200 | [diff] [blame] | 32 | #include <uapi/asm/sigcontext.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/proto.h> |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 34 | #include <asm/vdso.h> |
Hiroshi Shimamoto | d98f9d8 | 2008-12-17 18:52:45 -0800 | [diff] [blame] | 35 | #include <asm/sigframe.h> |
H. Peter Anvin | f28f0c2 | 2012-02-19 07:38:43 -0800 | [diff] [blame] | 36 | #include <asm/sighandling.h> |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 37 | #include <asm/smap.h> |
Hiroshi Shimamoto | d98f9d8 | 2008-12-17 18:52:45 -0800 | [diff] [blame] | 38 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 39 | static inline void reload_segments(struct sigcontext_32 *sc) |
| 40 | { |
| 41 | unsigned int cur; |
| 42 | |
| 43 | savesegment(gs, cur); |
| 44 | if ((sc->gs | 0x03) != cur) |
| 45 | load_gs_index(sc->gs | 0x03); |
| 46 | savesegment(fs, cur); |
| 47 | if ((sc->fs | 0x03) != cur) |
| 48 | loadsegment(fs, sc->fs | 0x03); |
| 49 | savesegment(ds, cur); |
| 50 | if ((sc->ds | 0x03) != cur) |
| 51 | loadsegment(ds, sc->ds | 0x03); |
| 52 | savesegment(es, cur); |
| 53 | if ((sc->es | 0x03) != cur) |
| 54 | loadsegment(es, sc->es | 0x03); |
| 55 | } |
| 56 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | /* |
| 58 | * Do a signal return; undo the signal stack. |
| 59 | */ |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 60 | static int ia32_restore_sigcontext(struct pt_regs *regs, |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 61 | struct sigcontext_32 __user *usc) |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 62 | { |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 63 | struct sigcontext_32 sc; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 64 | |
| 65 | /* Always make any pending restarted system calls return -EINTR */ |
Andy Lutomirski | f56141e | 2015-02-12 15:01:14 -0800 | [diff] [blame] | 66 | current->restart_block.fn = do_no_restart_syscall; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 67 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 68 | if (unlikely(copy_from_user(&sc, usc, sizeof(sc)))) |
| 69 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 71 | /* Get only the ia32 registers. */ |
| 72 | regs->bx = sc.bx; |
| 73 | regs->cx = sc.cx; |
| 74 | regs->dx = sc.dx; |
| 75 | regs->si = sc.si; |
| 76 | regs->di = sc.di; |
| 77 | regs->bp = sc.bp; |
| 78 | regs->ax = sc.ax; |
| 79 | regs->sp = sc.sp; |
| 80 | regs->ip = sc.ip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 82 | /* Get CS/SS and force CPL3 */ |
| 83 | regs->cs = sc.cs | 0x03; |
| 84 | regs->ss = sc.ss | 0x03; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 86 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS); |
| 87 | /* disable syscall checks */ |
| 88 | regs->orig_ax = -1; |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 89 | |
Peter Zijlstra | 67a0514 | 2019-02-25 12:56:35 +0100 | [diff] [blame] | 90 | /* |
| 91 | * Reload fs and gs if they have changed in the signal |
| 92 | * handler. This does not handle long fs/gs base changes in |
| 93 | * the handler, but does not clobber them at least in the |
| 94 | * normal case. |
| 95 | */ |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 96 | reload_segments(&sc); |
| 97 | return fpu__restore_sig(compat_ptr(sc.fpstate), 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | } |
| 99 | |
Sami Tolvanen | 00198a6 | 2019-10-08 15:40:47 -0700 | [diff] [blame] | 100 | COMPAT_SYSCALL_DEFINE0(sigreturn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 102 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 103 | struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | sigset_t set; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 106 | if (!access_ok(frame, sizeof(*frame))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | goto badframe; |
| 108 | if (__get_user(set.sig[0], &frame->sc.oldmask) |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 109 | || __get_user(((__u32 *)&set)[1], &frame->extramask[0])) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | goto badframe; |
| 111 | |
Oleg Nesterov | 905f29e | 2011-07-10 21:27:24 +0200 | [diff] [blame] | 112 | set_current_blocked(&set); |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 113 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 114 | if (ia32_restore_sigcontext(regs, &frame->sc)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | goto badframe; |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 116 | return regs->ax; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | badframe: |
| 119 | signal_fault(regs, frame, "32bit sigreturn"); |
| 120 | return 0; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 121 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
Sami Tolvanen | 00198a6 | 2019-10-08 15:40:47 -0700 | [diff] [blame] | 123 | COMPAT_SYSCALL_DEFINE0(rt_sigreturn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 125 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 126 | struct rt_sigframe_ia32 __user *frame; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | sigset_t set; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 129 | frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 131 | if (!access_ok(frame, sizeof(*frame))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | goto badframe; |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 133 | if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | goto badframe; |
| 135 | |
Oleg Nesterov | 905f29e | 2011-07-10 21:27:24 +0200 | [diff] [blame] | 136 | set_current_blocked(&set); |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 137 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 138 | if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | goto badframe; |
| 140 | |
Al Viro | 9026843 | 2012-12-14 14:47:53 -0500 | [diff] [blame] | 141 | if (compat_restore_altstack(&frame->uc.uc_stack)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | goto badframe; |
| 143 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 144 | return regs->ax; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
| 146 | badframe: |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 147 | signal_fault(regs, frame, "32bit rt sigreturn"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | return 0; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 149 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
| 151 | /* |
| 152 | * Set up a signal frame. |
| 153 | */ |
| 154 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 155 | #define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; }) |
| 156 | |
Ingo Molnar | 8fcb346 | 2015-09-05 09:32:41 +0200 | [diff] [blame] | 157 | static int ia32_setup_sigcontext(struct sigcontext_32 __user *sc, |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 158 | void __user *fpstate, |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 159 | struct pt_regs *regs, unsigned int mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | { |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame^] | 161 | if (!user_access_begin(sc, sizeof(struct sigcontext_32))) |
| 162 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame^] | 164 | unsafe_put_user(get_user_seg(gs), (unsigned int __user *)&sc->gs, Efault); |
| 165 | unsafe_put_user(get_user_seg(fs), (unsigned int __user *)&sc->fs, Efault); |
| 166 | unsafe_put_user(get_user_seg(ds), (unsigned int __user *)&sc->ds, Efault); |
| 167 | unsafe_put_user(get_user_seg(es), (unsigned int __user *)&sc->es, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame^] | 169 | unsafe_put_user(regs->di, &sc->di, Efault); |
| 170 | unsafe_put_user(regs->si, &sc->si, Efault); |
| 171 | unsafe_put_user(regs->bp, &sc->bp, Efault); |
| 172 | unsafe_put_user(regs->sp, &sc->sp, Efault); |
| 173 | unsafe_put_user(regs->bx, &sc->bx, Efault); |
| 174 | unsafe_put_user(regs->dx, &sc->dx, Efault); |
| 175 | unsafe_put_user(regs->cx, &sc->cx, Efault); |
| 176 | unsafe_put_user(regs->ax, &sc->ax, Efault); |
| 177 | unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault); |
| 178 | unsafe_put_user(current->thread.error_code, &sc->err, Efault); |
| 179 | unsafe_put_user(regs->ip, &sc->ip, Efault); |
| 180 | unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault); |
| 181 | unsafe_put_user(regs->flags, &sc->flags, Efault); |
| 182 | unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault); |
| 183 | unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame^] | 185 | unsafe_put_user(ptr_to_compat(fpstate), &sc->fpstate, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame^] | 187 | /* non-iBCS2 extensions.. */ |
| 188 | unsafe_put_user(mask, &sc->oldmask, Efault); |
| 189 | unsafe_put_user(current->thread.cr2, &sc->cr2, Efault); |
| 190 | user_access_end(); |
| 191 | return 0; |
| 192 | Efault: |
| 193 | user_access_end(); |
| 194 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | /* |
| 198 | * Determine which stack to use.. |
| 199 | */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 200 | static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, |
Suresh Siddha | 3c1c7f1 | 2008-07-29 10:29:21 -0700 | [diff] [blame] | 201 | size_t frame_size, |
Mathias Krause | 0ff8fef | 2012-09-02 23:31:42 +0200 | [diff] [blame] | 202 | void __user **fpstate) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | { |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 204 | unsigned long sp, fx_aligned, math_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | |
| 206 | /* Default to using normal stack */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 207 | sp = regs->sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | |
| 209 | /* This is the X/Open sanctioned signal stack switching. */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 210 | if (ksig->ka.sa.sa_flags & SA_ONSTACK) |
| 211 | sp = sigsp(sp, ksig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | /* This is the legacy signal stack switching. */ |
Andy Lutomirski | 9950481 | 2017-07-28 06:00:32 -0700 | [diff] [blame] | 213 | else if (regs->ss != __USER32_DS && |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 214 | !(ksig->ka.sa.sa_flags & SA_RESTORER) && |
| 215 | ksig->ka.sa.sa_restorer) |
| 216 | sp = (unsigned long) ksig->ka.sa.sa_restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 218 | sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); |
| 219 | *fpstate = (struct _fpstate_32 __user *) sp; |
| 220 | if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned, |
| 221 | math_size) < 0) |
| 222 | return (void __user *) -1L; |
Suresh Siddha | 3c1c7f1 | 2008-07-29 10:29:21 -0700 | [diff] [blame] | 223 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 224 | sp -= frame_size; |
Markus F.X.J. Oberhumer | d347f37 | 2005-10-09 18:54:23 +0200 | [diff] [blame] | 225 | /* Align the stack pointer according to the i386 ABI, |
| 226 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 227 | sp = ((sp + 4) & -16ul) - 4; |
| 228 | return (void __user *) sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | } |
| 230 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 231 | int ia32_setup_frame(int sig, struct ksignal *ksig, |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 232 | compat_sigset_t *set, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | { |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 234 | struct sigframe_ia32 __user *frame; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 235 | void __user *restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | int err = 0; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 237 | void __user *fpstate = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 239 | /* copy_to_user optimizes that into a single 8 byte store */ |
| 240 | static const struct { |
| 241 | u16 poplmovl; |
| 242 | u32 val; |
| 243 | u16 int80; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 244 | } __attribute__((packed)) code = { |
| 245 | 0xb858, /* popl %eax ; movl $...,%eax */ |
| 246 | __NR_ia32_sigreturn, |
| 247 | 0x80cd, /* int $0x80 */ |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 248 | }; |
| 249 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 250 | frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 252 | if (!access_ok(frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 253 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | |
Hiroshi Shimamoto | 2ba48e1 | 2008-09-12 17:02:53 -0700 | [diff] [blame] | 255 | if (__put_user(sig, &frame->sig)) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 256 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
Hiroshi Shimamoto | 2ba48e1 | 2008-09-12 17:02:53 -0700 | [diff] [blame] | 258 | if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 259 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 261 | if (__put_user(set->sig[1], &frame->extramask[0])) |
| 262 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 264 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
| 265 | restorer = ksig->ka.sa.sa_restorer; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 266 | } else { |
| 267 | /* Return stub is in 32bit vsyscall page */ |
Roland McGrath | 1a3e4ca | 2008-04-09 01:29:27 -0700 | [diff] [blame] | 268 | if (current->mm->context.vdso) |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 269 | restorer = current->mm->context.vdso + |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 270 | vdso_image_32.sym___kernel_sigreturn; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 271 | else |
Jan Engelhardt | ade1af7 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 272 | restorer = &frame->retcode; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 273 | } |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 274 | |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 275 | put_user_try { |
| 276 | put_user_ex(ptr_to_compat(restorer), &frame->pretcode); |
| 277 | |
| 278 | /* |
| 279 | * These are actually not used anymore, but left because some |
| 280 | * gdb versions depend on them as a marker. |
| 281 | */ |
Mathias Krause | 0ff8fef | 2012-09-02 23:31:42 +0200 | [diff] [blame] | 282 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 283 | } put_user_catch(err); |
| 284 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | if (err) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 286 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
| 288 | /* Set up registers for signal handler */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 289 | regs->sp = (unsigned long) frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 290 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | |
Andi Kleen | 536e3ee | 2006-09-26 10:52:41 +0200 | [diff] [blame] | 292 | /* Make -mregparm=3 work */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 293 | regs->ax = sig; |
| 294 | regs->dx = 0; |
| 295 | regs->cx = 0; |
Andi Kleen | 536e3ee | 2006-09-26 10:52:41 +0200 | [diff] [blame] | 296 | |
Jeremy Fitzhardinge | b6edbb1 | 2008-08-19 13:04:19 -0700 | [diff] [blame] | 297 | loadsegment(ds, __USER32_DS); |
| 298 | loadsegment(es, __USER32_DS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 300 | regs->cs = __USER32_CS; |
| 301 | regs->ss = __USER32_DS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | |
Andi Kleen | 1d001df | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 303 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | } |
| 305 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 306 | int ia32_setup_rt_frame(int sig, struct ksignal *ksig, |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 307 | compat_sigset_t *set, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | { |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 309 | struct rt_sigframe_ia32 __user *frame; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 310 | void __user *restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | int err = 0; |
Suresh Siddha | ab51370 | 2008-07-29 10:29:22 -0700 | [diff] [blame] | 312 | void __user *fpstate = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 314 | /* __copy_to_user optimizes that into a single 8 byte store */ |
| 315 | static const struct { |
| 316 | u8 movl; |
| 317 | u32 val; |
| 318 | u16 int80; |
Hiroshi Shimamoto | 9cc3c49 | 2008-11-11 19:11:39 -0800 | [diff] [blame] | 319 | u8 pad; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 320 | } __attribute__((packed)) code = { |
| 321 | 0xb8, |
| 322 | __NR_ia32_rt_sigreturn, |
| 323 | 0x80cd, |
| 324 | 0, |
| 325 | }; |
| 326 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 327 | frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 329 | if (!access_ok(frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 330 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 332 | put_user_try { |
| 333 | put_user_ex(sig, &frame->sig); |
| 334 | put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo); |
| 335 | put_user_ex(ptr_to_compat(&frame->uc), &frame->puc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 337 | /* Create the ucontext. */ |
Marco Elver | ff66135 | 2019-07-11 20:53:56 -0700 | [diff] [blame] | 338 | if (static_cpu_has(X86_FEATURE_XSAVE)) |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 339 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); |
| 340 | else |
| 341 | put_user_ex(0, &frame->uc.uc_flags); |
| 342 | put_user_ex(0, &frame->uc.uc_link); |
Al Viro | bd1c149a | 2013-09-01 20:35:01 +0100 | [diff] [blame] | 343 | compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 345 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
| 346 | restorer = ksig->ka.sa.sa_restorer; |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 347 | else |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 348 | restorer = current->mm->context.vdso + |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 349 | vdso_image_32.sym___kernel_rt_sigreturn; |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 350 | put_user_ex(ptr_to_compat(restorer), &frame->pretcode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 352 | /* |
| 353 | * Not actually used anymore, but left because some gdb |
| 354 | * versions need it. |
| 355 | */ |
Mathias Krause | 0ff8fef | 2012-09-02 23:31:42 +0200 | [diff] [blame] | 356 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 357 | } put_user_catch(err); |
| 358 | |
Dmitry Safonov | 6846351 | 2016-09-05 16:33:08 +0300 | [diff] [blame] | 359 | err |= __copy_siginfo_to_user32(&frame->info, &ksig->info, false); |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 360 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
| 361 | regs, set->sig[0]); |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 362 | err |= __put_user(*(__u64 *)set, (__u64 __user *)&frame->uc.uc_sigmask); |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 363 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | if (err) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 365 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
| 367 | /* Set up registers for signal handler */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 368 | regs->sp = (unsigned long) frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 369 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
Albert Cahalan | a7aacdf | 2006-10-29 22:26:17 -0500 | [diff] [blame] | 371 | /* Make -mregparm=3 work */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 372 | regs->ax = sig; |
| 373 | regs->dx = (unsigned long) &frame->info; |
| 374 | regs->cx = (unsigned long) &frame->uc; |
Albert Cahalan | a7aacdf | 2006-10-29 22:26:17 -0500 | [diff] [blame] | 375 | |
Jeremy Fitzhardinge | b6edbb1 | 2008-08-19 13:04:19 -0700 | [diff] [blame] | 376 | loadsegment(ds, __USER32_DS); |
| 377 | loadsegment(es, __USER32_DS); |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 378 | |
| 379 | regs->cs = __USER32_CS; |
| 380 | regs->ss = __USER32_DS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | |
Andi Kleen | 1d001df | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 382 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } |