Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/arch/x86_64/ia32/ia32_signal.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | * |
| 7 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
| 8 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
| 9 | * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 13 | #include <linux/sched/task_stack.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/errno.h> |
| 18 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/unistd.h> |
| 20 | #include <linux/stddef.h> |
| 21 | #include <linux/personality.h> |
| 22 | #include <linux/compat.h> |
Andi Kleen | 9fbbd4d | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 23 | #include <linux/binfmts.h> |
Sami Tolvanen | 00198a6 | 2019-10-08 15:40:47 -0700 | [diff] [blame] | 24 | #include <linux/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/ucontext.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 26 | #include <linux/uaccess.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 27 | #include <asm/fpu/internal.h> |
Ingo Molnar | fcbc99c | 2015-04-30 08:45:02 +0200 | [diff] [blame] | 28 | #include <asm/fpu/signal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/ptrace.h> |
| 30 | #include <asm/ia32_unistd.h> |
| 31 | #include <asm/user32.h> |
Ingo Molnar | decb4c4 | 2015-09-05 09:32:43 +0200 | [diff] [blame] | 32 | #include <uapi/asm/sigcontext.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/proto.h> |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 34 | #include <asm/vdso.h> |
Hiroshi Shimamoto | d98f9d8 | 2008-12-17 18:52:45 -0800 | [diff] [blame] | 35 | #include <asm/sigframe.h> |
H. Peter Anvin | f28f0c2 | 2012-02-19 07:38:43 -0800 | [diff] [blame] | 36 | #include <asm/sighandling.h> |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 37 | #include <asm/smap.h> |
Hiroshi Shimamoto | d98f9d8 | 2008-12-17 18:52:45 -0800 | [diff] [blame] | 38 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 39 | static inline void reload_segments(struct sigcontext_32 *sc) |
| 40 | { |
| 41 | unsigned int cur; |
| 42 | |
| 43 | savesegment(gs, cur); |
| 44 | if ((sc->gs | 0x03) != cur) |
| 45 | load_gs_index(sc->gs | 0x03); |
| 46 | savesegment(fs, cur); |
| 47 | if ((sc->fs | 0x03) != cur) |
| 48 | loadsegment(fs, sc->fs | 0x03); |
| 49 | savesegment(ds, cur); |
| 50 | if ((sc->ds | 0x03) != cur) |
| 51 | loadsegment(ds, sc->ds | 0x03); |
| 52 | savesegment(es, cur); |
| 53 | if ((sc->es | 0x03) != cur) |
| 54 | loadsegment(es, sc->es | 0x03); |
| 55 | } |
| 56 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | /* |
| 58 | * Do a signal return; undo the signal stack. |
| 59 | */ |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 60 | static int ia32_restore_sigcontext(struct pt_regs *regs, |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 61 | struct sigcontext_32 __user *usc) |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 62 | { |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 63 | struct sigcontext_32 sc; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 64 | |
| 65 | /* Always make any pending restarted system calls return -EINTR */ |
Andy Lutomirski | f56141e | 2015-02-12 15:01:14 -0800 | [diff] [blame] | 66 | current->restart_block.fn = do_no_restart_syscall; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 67 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 68 | if (unlikely(copy_from_user(&sc, usc, sizeof(sc)))) |
| 69 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 71 | /* Get only the ia32 registers. */ |
| 72 | regs->bx = sc.bx; |
| 73 | regs->cx = sc.cx; |
| 74 | regs->dx = sc.dx; |
| 75 | regs->si = sc.si; |
| 76 | regs->di = sc.di; |
| 77 | regs->bp = sc.bp; |
| 78 | regs->ax = sc.ax; |
| 79 | regs->sp = sc.sp; |
| 80 | regs->ip = sc.ip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 82 | /* Get CS/SS and force CPL3 */ |
| 83 | regs->cs = sc.cs | 0x03; |
| 84 | regs->ss = sc.ss | 0x03; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 86 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS); |
| 87 | /* disable syscall checks */ |
| 88 | regs->orig_ax = -1; |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 89 | |
Peter Zijlstra | 67a0514 | 2019-02-25 12:56:35 +0100 | [diff] [blame] | 90 | /* |
| 91 | * Reload fs and gs if they have changed in the signal |
| 92 | * handler. This does not handle long fs/gs base changes in |
| 93 | * the handler, but does not clobber them at least in the |
| 94 | * normal case. |
| 95 | */ |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 96 | reload_segments(&sc); |
| 97 | return fpu__restore_sig(compat_ptr(sc.fpstate), 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | } |
| 99 | |
Sami Tolvanen | 00198a6 | 2019-10-08 15:40:47 -0700 | [diff] [blame] | 100 | COMPAT_SYSCALL_DEFINE0(sigreturn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 102 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 103 | struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | sigset_t set; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 106 | if (!access_ok(frame, sizeof(*frame))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | goto badframe; |
| 108 | if (__get_user(set.sig[0], &frame->sc.oldmask) |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 109 | || __get_user(((__u32 *)&set)[1], &frame->extramask[0])) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | goto badframe; |
| 111 | |
Oleg Nesterov | 905f29e | 2011-07-10 21:27:24 +0200 | [diff] [blame] | 112 | set_current_blocked(&set); |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 113 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 114 | if (ia32_restore_sigcontext(regs, &frame->sc)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | goto badframe; |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 116 | return regs->ax; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | badframe: |
| 119 | signal_fault(regs, frame, "32bit sigreturn"); |
| 120 | return 0; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 121 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
Sami Tolvanen | 00198a6 | 2019-10-08 15:40:47 -0700 | [diff] [blame] | 123 | COMPAT_SYSCALL_DEFINE0(rt_sigreturn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | { |
Al Viro | 3fe26fa | 2012-11-12 14:32:42 -0500 | [diff] [blame] | 125 | struct pt_regs *regs = current_pt_regs(); |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 126 | struct rt_sigframe_ia32 __user *frame; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | sigset_t set; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 129 | frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 131 | if (!access_ok(frame, sizeof(*frame))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | goto badframe; |
Al Viro | 71c3313 | 2020-02-15 11:43:18 -0500 | [diff] [blame] | 133 | if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | goto badframe; |
| 135 | |
Oleg Nesterov | 905f29e | 2011-07-10 21:27:24 +0200 | [diff] [blame] | 136 | set_current_blocked(&set); |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 137 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 138 | if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | goto badframe; |
| 140 | |
Al Viro | 9026843 | 2012-12-14 14:47:53 -0500 | [diff] [blame] | 141 | if (compat_restore_altstack(&frame->uc.uc_stack)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | goto badframe; |
| 143 | |
Brian Gerst | 6a3713f | 2015-04-04 08:58:23 -0400 | [diff] [blame] | 144 | return regs->ax; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
| 146 | badframe: |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 147 | signal_fault(regs, frame, "32bit rt sigreturn"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | return 0; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 149 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
| 151 | /* |
| 152 | * Set up a signal frame. |
| 153 | */ |
| 154 | |
Al Viro | 978727c | 2020-02-15 12:23:36 -0500 | [diff] [blame] | 155 | #define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; }) |
| 156 | |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 157 | static __always_inline int |
| 158 | __unsafe_setup_sigcontext32(struct sigcontext_32 __user *sc, |
| 159 | void __user *fpstate, |
| 160 | struct pt_regs *regs, unsigned int mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame] | 162 | unsafe_put_user(get_user_seg(gs), (unsigned int __user *)&sc->gs, Efault); |
| 163 | unsafe_put_user(get_user_seg(fs), (unsigned int __user *)&sc->fs, Efault); |
| 164 | unsafe_put_user(get_user_seg(ds), (unsigned int __user *)&sc->ds, Efault); |
| 165 | unsafe_put_user(get_user_seg(es), (unsigned int __user *)&sc->es, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame] | 167 | unsafe_put_user(regs->di, &sc->di, Efault); |
| 168 | unsafe_put_user(regs->si, &sc->si, Efault); |
| 169 | unsafe_put_user(regs->bp, &sc->bp, Efault); |
| 170 | unsafe_put_user(regs->sp, &sc->sp, Efault); |
| 171 | unsafe_put_user(regs->bx, &sc->bx, Efault); |
| 172 | unsafe_put_user(regs->dx, &sc->dx, Efault); |
| 173 | unsafe_put_user(regs->cx, &sc->cx, Efault); |
| 174 | unsafe_put_user(regs->ax, &sc->ax, Efault); |
| 175 | unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault); |
| 176 | unsafe_put_user(current->thread.error_code, &sc->err, Efault); |
| 177 | unsafe_put_user(regs->ip, &sc->ip, Efault); |
| 178 | unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault); |
| 179 | unsafe_put_user(regs->flags, &sc->flags, Efault); |
| 180 | unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault); |
| 181 | unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame] | 183 | unsafe_put_user(ptr_to_compat(fpstate), &sc->fpstate, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame] | 185 | /* non-iBCS2 extensions.. */ |
| 186 | unsafe_put_user(mask, &sc->oldmask, Efault); |
| 187 | unsafe_put_user(current->thread.cr2, &sc->cr2, Efault); |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame] | 188 | return 0; |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 189 | |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame] | 190 | Efault: |
Al Viro | d2d2728 | 2020-02-15 17:41:04 -0500 | [diff] [blame] | 191 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 194 | #define unsafe_put_sigcontext32(sc, fp, regs, set, label) \ |
| 195 | do { \ |
| 196 | if (__unsafe_setup_sigcontext32(sc, fp, regs, set->sig[0])) \ |
| 197 | goto label; \ |
| 198 | } while(0) |
| 199 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | /* |
| 201 | * Determine which stack to use.. |
| 202 | */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 203 | static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, |
Suresh Siddha | 3c1c7f1 | 2008-07-29 10:29:21 -0700 | [diff] [blame] | 204 | size_t frame_size, |
Mathias Krause | 0ff8fef | 2012-09-02 23:31:42 +0200 | [diff] [blame] | 205 | void __user **fpstate) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | { |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 207 | unsigned long sp, fx_aligned, math_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | |
| 209 | /* Default to using normal stack */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 210 | sp = regs->sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
| 212 | /* This is the X/Open sanctioned signal stack switching. */ |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 213 | if (ksig->ka.sa.sa_flags & SA_ONSTACK) |
| 214 | sp = sigsp(sp, ksig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | /* This is the legacy signal stack switching. */ |
Andy Lutomirski | 9950481 | 2017-07-28 06:00:32 -0700 | [diff] [blame] | 216 | else if (regs->ss != __USER32_DS && |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 217 | !(ksig->ka.sa.sa_flags & SA_RESTORER) && |
| 218 | ksig->ka.sa.sa_restorer) |
| 219 | sp = (unsigned long) ksig->ka.sa.sa_restorer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 221 | sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); |
| 222 | *fpstate = (struct _fpstate_32 __user *) sp; |
| 223 | if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned, |
| 224 | math_size) < 0) |
| 225 | return (void __user *) -1L; |
Suresh Siddha | 3c1c7f1 | 2008-07-29 10:29:21 -0700 | [diff] [blame] | 226 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 227 | sp -= frame_size; |
Markus F.X.J. Oberhumer | d347f37 | 2005-10-09 18:54:23 +0200 | [diff] [blame] | 228 | /* Align the stack pointer according to the i386 ABI, |
| 229 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 230 | sp = ((sp + 4) & -16ul) - 4; |
| 231 | return (void __user *) sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } |
| 233 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 234 | int ia32_setup_frame(int sig, struct ksignal *ksig, |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 235 | compat_sigset_t *set, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 237 | struct sigframe_ia32 __user *frame; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 238 | void __user *restorer; |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 239 | void __user *fp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 241 | /* copy_to_user optimizes that into a single 8 byte store */ |
| 242 | static const struct { |
| 243 | u16 poplmovl; |
| 244 | u32 val; |
| 245 | u16 int80; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 246 | } __attribute__((packed)) code = { |
| 247 | 0xb858, /* popl %eax ; movl $...,%eax */ |
| 248 | __NR_ia32_sigreturn, |
| 249 | 0x80cd, /* int $0x80 */ |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 250 | }; |
| 251 | |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 252 | frame = get_sigframe(ksig, regs, sizeof(*frame), &fp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 254 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
| 255 | restorer = ksig->ka.sa.sa_restorer; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 256 | } else { |
| 257 | /* Return stub is in 32bit vsyscall page */ |
Roland McGrath | 1a3e4ca | 2008-04-09 01:29:27 -0700 | [diff] [blame] | 258 | if (current->mm->context.vdso) |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 259 | restorer = current->mm->context.vdso + |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 260 | vdso_image_32.sym___kernel_sigreturn; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 261 | else |
Jan Engelhardt | ade1af7 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 262 | restorer = &frame->retcode; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 263 | } |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 264 | |
Al Viro | e239074 | 2020-02-15 19:36:40 -0500 | [diff] [blame] | 265 | if (!user_access_begin(frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 266 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | |
Al Viro | e239074 | 2020-02-15 19:36:40 -0500 | [diff] [blame] | 268 | unsafe_put_user(sig, &frame->sig, Efault); |
| 269 | unsafe_put_sigcontext32(&frame->sc, fp, regs, set, Efault); |
| 270 | unsafe_put_user(set->sig[1], &frame->extramask[0], Efault); |
| 271 | unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault); |
| 272 | /* |
| 273 | * These are actually not used anymore, but left because some |
| 274 | * gdb versions depend on them as a marker. |
| 275 | */ |
| 276 | unsafe_put_user(*((u64 *)&code), (u64 __user *)frame->retcode, Efault); |
| 277 | user_access_end(); |
| 278 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | /* Set up registers for signal handler */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 280 | regs->sp = (unsigned long) frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 281 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | |
Andi Kleen | 536e3ee | 2006-09-26 10:52:41 +0200 | [diff] [blame] | 283 | /* Make -mregparm=3 work */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 284 | regs->ax = sig; |
| 285 | regs->dx = 0; |
| 286 | regs->cx = 0; |
Andi Kleen | 536e3ee | 2006-09-26 10:52:41 +0200 | [diff] [blame] | 287 | |
Jeremy Fitzhardinge | b6edbb1 | 2008-08-19 13:04:19 -0700 | [diff] [blame] | 288 | loadsegment(ds, __USER32_DS); |
| 289 | loadsegment(es, __USER32_DS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 291 | regs->cs = __USER32_CS; |
| 292 | regs->ss = __USER32_DS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
Andi Kleen | 1d001df | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 294 | return 0; |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 295 | Efault: |
| 296 | user_access_end(); |
| 297 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 300 | int ia32_setup_rt_frame(int sig, struct ksignal *ksig, |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 301 | compat_sigset_t *set, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | { |
Hiroshi Shimamoto | 3b0d29e | 2008-12-17 18:51:46 -0800 | [diff] [blame] | 303 | struct rt_sigframe_ia32 __user *frame; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 304 | void __user *restorer; |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 305 | void __user *fp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
Al Viro | 57d563c | 2020-02-15 19:42:40 -0500 | [diff] [blame] | 307 | /* unsafe_put_user optimizes that into a single 8 byte store */ |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 308 | static const struct { |
| 309 | u8 movl; |
| 310 | u32 val; |
| 311 | u16 int80; |
Hiroshi Shimamoto | 9cc3c49 | 2008-11-11 19:11:39 -0800 | [diff] [blame] | 312 | u8 pad; |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 313 | } __attribute__((packed)) code = { |
| 314 | 0xb8, |
| 315 | __NR_ia32_rt_sigreturn, |
| 316 | 0x80cd, |
| 317 | 0, |
| 318 | }; |
| 319 | |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 320 | frame = get_sigframe(ksig, regs, sizeof(*frame), &fp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 322 | if (!user_access_begin(frame, sizeof(*frame))) |
Hiroshi Shimamoto | 3d0aedd | 2008-09-12 17:01:09 -0700 | [diff] [blame] | 323 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 325 | unsafe_put_user(sig, &frame->sig, Efault); |
| 326 | unsafe_put_user(ptr_to_compat(&frame->info), &frame->pinfo, Efault); |
| 327 | unsafe_put_user(ptr_to_compat(&frame->uc), &frame->puc, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 329 | /* Create the ucontext. */ |
| 330 | if (static_cpu_has(X86_FEATURE_XSAVE)) |
| 331 | unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault); |
| 332 | else |
| 333 | unsafe_put_user(0, &frame->uc.uc_flags, Efault); |
| 334 | unsafe_put_user(0, &frame->uc.uc_link, Efault); |
| 335 | unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 337 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
| 338 | restorer = ksig->ka.sa.sa_restorer; |
| 339 | else |
| 340 | restorer = current->mm->context.vdso + |
| 341 | vdso_image_32.sym___kernel_rt_sigreturn; |
| 342 | unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 344 | /* |
| 345 | * Not actually used anymore, but left because some gdb |
| 346 | * versions need it. |
| 347 | */ |
| 348 | unsafe_put_user(*((u64 *)&code), (u64 __user *)frame->retcode, Efault); |
Al Viro | 57d563c | 2020-02-15 19:42:40 -0500 | [diff] [blame] | 349 | unsafe_put_sigcontext32(&frame->uc.uc_mcontext, fp, regs, set, Efault); |
| 350 | unsafe_put_user(*(__u64 *)set, (__u64 *)&frame->uc.uc_sigmask, Efault); |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 351 | user_access_end(); |
Hiroshi Shimamoto | 3b4b757 | 2009-01-23 15:50:38 -0800 | [diff] [blame] | 352 | |
Christoph Hellwig | c3b3f52 | 2020-05-05 12:12:53 +0200 | [diff] [blame] | 353 | if (__copy_siginfo_to_user32(&frame->info, &ksig->info)) |
Al Viro | 44a1d99 | 2020-02-15 18:46:02 -0500 | [diff] [blame] | 354 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | |
| 356 | /* Set up registers for signal handler */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 357 | regs->sp = (unsigned long) frame; |
Al Viro | 235b802 | 2012-11-09 23:51:47 -0500 | [diff] [blame] | 358 | regs->ip = (unsigned long) ksig->ka.sa.sa_handler; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | |
Albert Cahalan | a7aacdf | 2006-10-29 22:26:17 -0500 | [diff] [blame] | 360 | /* Make -mregparm=3 work */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 361 | regs->ax = sig; |
| 362 | regs->dx = (unsigned long) &frame->info; |
| 363 | regs->cx = (unsigned long) &frame->uc; |
Albert Cahalan | a7aacdf | 2006-10-29 22:26:17 -0500 | [diff] [blame] | 364 | |
Jeremy Fitzhardinge | b6edbb1 | 2008-08-19 13:04:19 -0700 | [diff] [blame] | 365 | loadsegment(ds, __USER32_DS); |
| 366 | loadsegment(es, __USER32_DS); |
Thomas Gleixner | 99b9cdf | 2008-01-30 13:30:07 +0100 | [diff] [blame] | 367 | |
| 368 | regs->cs = __USER32_CS; |
| 369 | regs->ss = __USER32_DS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
Andi Kleen | 1d001df | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 371 | return 0; |
Al Viro | 39f16c1 | 2020-02-15 18:39:17 -0500 | [diff] [blame] | 372 | Efault: |
| 373 | user_access_end(); |
| 374 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | } |