blob: ec71e06ae364b864335b585d8515866d86eb9b09 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 1991, 1992 Linus Torvalds
Hiroshi Shimamotoe5fa2d02008-11-24 18:24:11 -08004 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
Hiroshi Shimamotoe5fa2d02008-11-24 18:24:11 -08008 * 2000-2002 x86-64 support by Andi Kleen
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
Joe Perchesc767a542012-05-21 19:50:07 -070010
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Ingo Molnar7e907f42008-03-06 10:33:08 +010013#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010014#include <linux/sched/task_stack.h>
Ingo Molnar7e907f42008-03-06 10:33:08 +010015#include <linux/mm.h>
Hiroshi Shimamoto5c9b3a02008-11-21 17:36:41 -080016#include <linux/smp.h>
17#include <linux/kernel.h>
Thomas Gleixner3aac3eb2021-10-21 15:55:06 -070018#include <linux/kstrtox.h>
Hiroshi Shimamoto5c9b3a02008-11-21 17:36:41 -080019#include <linux/errno.h>
20#include <linux/wait.h>
Hiroshi Shimamoto5c9b3a02008-11-21 17:36:41 -080021#include <linux/tracehook.h>
22#include <linux/unistd.h>
23#include <linux/stddef.h>
24#include <linux/personality.h>
25#include <linux/uaccess.h>
Avi Kivity7c68af62009-09-19 09:40:22 +030026#include <linux/user-return-notifier.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053027#include <linux/uprobes.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010028#include <linux/context_tracking.h>
Thomas Gleixner167fd212020-07-23 00:00:05 +020029#include <linux/entry-common.h>
Tautschnig, Michael4c8ca512018-03-14 09:41:42 +000030#include <linux/syscalls.h>
Ingo Molnar7e907f42008-03-06 10:33:08 +010031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/processor.h>
33#include <asm/ucontext.h>
Ingo Molnarfcbc99c2015-04-30 08:45:02 +020034#include <asm/fpu/signal.h>
Thomas Gleixner4b7ca602021-10-21 15:55:12 -070035#include <asm/fpu/xstate.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010036#include <asm/vdso.h>
Andi Kleen4efc0672009-04-28 19:07:31 +020037#include <asm/mce.h>
H. Peter Anvinf28f0c22012-02-19 07:38:43 -080038#include <asm/sighandling.h>
Brian Gerstba3e1272015-07-29 01:41:21 -040039#include <asm/vm86.h>
Hiroshi Shimamoto5c9b3a02008-11-21 17:36:41 -080040
41#ifdef CONFIG_X86_64
Christoph Hellwigc3b3f522020-05-05 12:12:53 +020042#include <linux/compat.h>
Hiroshi Shimamoto5c9b3a02008-11-21 17:36:41 -080043#include <asm/proto.h>
44#include <asm/ia32_unistd.h>
Thomas Gleixner3aac3eb2021-10-21 15:55:06 -070045#include <asm/fpu/xstate.h>
Hiroshi Shimamoto5c9b3a02008-11-21 17:36:41 -080046#endif /* CONFIG_X86_64 */
47
Hiroshi Shimamotobb579252008-09-05 16:26:55 -070048#include <asm/syscall.h>
Hiroshi Shimamoto41af86f2008-12-17 18:50:32 -080049#include <asm/sigframe.h>
Dmitry Safonov68463512016-09-05 16:33:08 +030050#include <asm/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Andy Lutomirski8ff5bd22016-02-16 15:09:02 -080052#ifdef CONFIG_X86_64
53/*
54 * If regs->ss will cause an IRET fault, change it. Otherwise leave it
55 * alone. Using this generally makes no sense unless
56 * user_64bit_mode(regs) would return true.
57 */
58static void force_valid_ss(struct pt_regs *regs)
59{
60 u32 ar;
61 asm volatile ("lar %[old_ss], %[ar]\n\t"
62 "jz 1f\n\t" /* If invalid: */
63 "xorl %[ar], %[ar]\n\t" /* set ar = 0 */
64 "1:"
65 : [ar] "=r" (ar)
66 : [old_ss] "rm" ((u16)regs->ss));
67
68 /*
69 * For a valid 64-bit user context, we need DPL 3, type
70 * read-write data or read-write exp-down data, and S and P
71 * set. We can't use VERW because VERW doesn't check the
72 * P bit.
73 */
74 ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
75 if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
76 ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
77 regs->ss = __USER_DS;
78}
Al Viro3add42c2020-02-15 12:56:57 -050079# define CONTEXT_COPY_SIZE offsetof(struct sigcontext, reserved1)
80#else
81# define CONTEXT_COPY_SIZE sizeof(struct sigcontext)
Andy Lutomirski8ff5bd22016-02-16 15:09:02 -080082#endif
83
Thomas Gleixneree4ecdf2021-09-08 15:29:35 +020084static bool restore_sigcontext(struct pt_regs *regs,
85 struct sigcontext __user *usc,
86 unsigned long uc_flags)
Hiroshi Shimamoto26016572008-11-24 18:21:37 -080087{
Al Viro3add42c2020-02-15 12:56:57 -050088 struct sigcontext sc;
Hiroshi Shimamoto26016572008-11-24 18:21:37 -080089
90 /* Always make any pending restarted system calls return -EINTR */
Andy Lutomirskif56141e2015-02-12 15:01:14 -080091 current->restart_block.fn = do_no_restart_syscall;
Hiroshi Shimamoto26016572008-11-24 18:21:37 -080092
Al Viro3add42c2020-02-15 12:56:57 -050093 if (copy_from_user(&sc, usc, CONTEXT_COPY_SIZE))
Thomas Gleixneree4ecdf2021-09-08 15:29:35 +020094 return false;
Hiroshi Shimamoto98e3d452009-01-23 15:50:10 -080095
Hiroshi Shimamoto26016572008-11-24 18:21:37 -080096#ifdef CONFIG_X86_32
Al Viro3add42c2020-02-15 12:56:57 -050097 set_user_gs(regs, sc.gs);
98 regs->fs = sc.fs;
99 regs->es = sc.es;
100 regs->ds = sc.ds;
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800101#endif /* CONFIG_X86_32 */
102
Al Viro3add42c2020-02-15 12:56:57 -0500103 regs->bx = sc.bx;
104 regs->cx = sc.cx;
105 regs->dx = sc.dx;
106 regs->si = sc.si;
107 regs->di = sc.di;
108 regs->bp = sc.bp;
109 regs->ax = sc.ax;
110 regs->sp = sc.sp;
111 regs->ip = sc.ip;
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800112
113#ifdef CONFIG_X86_64
Al Viro3add42c2020-02-15 12:56:57 -0500114 regs->r8 = sc.r8;
115 regs->r9 = sc.r9;
116 regs->r10 = sc.r10;
117 regs->r11 = sc.r11;
118 regs->r12 = sc.r12;
119 regs->r13 = sc.r13;
120 regs->r14 = sc.r14;
121 regs->r15 = sc.r15;
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800122#endif /* CONFIG_X86_64 */
123
Al Viro3add42c2020-02-15 12:56:57 -0500124 /* Get CS/SS and force CPL3 */
125 regs->cs = sc.cs | 0x03;
126 regs->ss = sc.ss | 0x03;
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800127
Al Viro3add42c2020-02-15 12:56:57 -0500128 regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS);
129 /* disable syscall checks */
130 regs->orig_ax = -1;
Hiroshi Shimamoto98e3d452009-01-23 15:50:10 -0800131
Peter Zijlstra88e47182019-04-03 09:39:48 +0200132#ifdef CONFIG_X86_64
133 /*
134 * Fix up SS if needed for the benefit of old DOSEMU and
135 * CRIU.
136 */
137 if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
138 force_valid_ss(regs);
139#endif
140
Thomas Gleixnerf3305be2021-09-08 15:29:37 +0200141 return fpu__restore_sig((void __user *)sc.fpstate,
142 IS_ENABLED(CONFIG_X86_32));
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800143}
144
Al Virob00d8f82020-02-15 21:12:26 -0500145static __always_inline int
146__unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
H. Peter Anvin85139422012-02-19 07:43:09 -0800147 struct pt_regs *regs, unsigned long mask)
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800148{
Hiroshi Shimamoto98e3d452009-01-23 15:50:10 -0800149#ifdef CONFIG_X86_32
Al Viro9f855c02020-02-15 17:25:27 -0500150 unsafe_put_user(get_user_gs(regs),
151 (unsigned int __user *)&sc->gs, Efault);
152 unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault);
153 unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault);
154 unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault);
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800155#endif /* CONFIG_X86_32 */
156
Al Viro9f855c02020-02-15 17:25:27 -0500157 unsafe_put_user(regs->di, &sc->di, Efault);
158 unsafe_put_user(regs->si, &sc->si, Efault);
159 unsafe_put_user(regs->bp, &sc->bp, Efault);
160 unsafe_put_user(regs->sp, &sc->sp, Efault);
161 unsafe_put_user(regs->bx, &sc->bx, Efault);
162 unsafe_put_user(regs->dx, &sc->dx, Efault);
163 unsafe_put_user(regs->cx, &sc->cx, Efault);
164 unsafe_put_user(regs->ax, &sc->ax, Efault);
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800165#ifdef CONFIG_X86_64
Al Viro9f855c02020-02-15 17:25:27 -0500166 unsafe_put_user(regs->r8, &sc->r8, Efault);
167 unsafe_put_user(regs->r9, &sc->r9, Efault);
168 unsafe_put_user(regs->r10, &sc->r10, Efault);
169 unsafe_put_user(regs->r11, &sc->r11, Efault);
170 unsafe_put_user(regs->r12, &sc->r12, Efault);
171 unsafe_put_user(regs->r13, &sc->r13, Efault);
172 unsafe_put_user(regs->r14, &sc->r14, Efault);
173 unsafe_put_user(regs->r15, &sc->r15, Efault);
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800174#endif /* CONFIG_X86_64 */
175
Al Viro9f855c02020-02-15 17:25:27 -0500176 unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
177 unsafe_put_user(current->thread.error_code, &sc->err, Efault);
178 unsafe_put_user(regs->ip, &sc->ip, Efault);
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800179#ifdef CONFIG_X86_32
Al Viro9f855c02020-02-15 17:25:27 -0500180 unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault);
181 unsafe_put_user(regs->flags, &sc->flags, Efault);
182 unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault);
183 unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault);
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800184#else /* !CONFIG_X86_32 */
Al Viro9f855c02020-02-15 17:25:27 -0500185 unsafe_put_user(regs->flags, &sc->flags, Efault);
186 unsafe_put_user(regs->cs, &sc->cs, Efault);
187 unsafe_put_user(0, &sc->gs, Efault);
188 unsafe_put_user(0, &sc->fs, Efault);
189 unsafe_put_user(regs->ss, &sc->ss, Efault);
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800190#endif /* CONFIG_X86_32 */
191
Al Viro9f855c02020-02-15 17:25:27 -0500192 unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault);
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800193
Al Viro9f855c02020-02-15 17:25:27 -0500194 /* non-iBCS2 extensions.. */
195 unsafe_put_user(mask, &sc->oldmask, Efault);
196 unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
Al Viro9f855c02020-02-15 17:25:27 -0500197 return 0;
198Efault:
Al Viro9f855c02020-02-15 17:25:27 -0500199 return -EFAULT;
Hiroshi Shimamoto26016572008-11-24 18:21:37 -0800200}
201
Al Virob00d8f82020-02-15 21:12:26 -0500202#define unsafe_put_sigcontext(sc, fp, regs, set, label) \
203do { \
204 if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0])) \
205 goto label; \
206} while(0);
207
Al Virob87df652020-02-15 21:36:52 -0500208#define unsafe_put_sigmask(set, frame, label) \
209 unsafe_put_user(*(__u64 *)(set), \
210 (__u64 __user *)&(frame)->uc.uc_sigmask, \
211 label)
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 * Set up a signal frame.
215 */
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800216
Chang S. Bae939ef712021-05-18 13:03:16 -0700217/* x86 ABI requires 16-byte alignment */
218#define FRAME_ALIGNMENT 16UL
219
220#define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1)
221
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800222/*
223 * Determine which stack to use..
224 */
Hiroshi Shimamoto1fae0272009-02-27 10:30:32 -0800225static unsigned long align_sigframe(unsigned long sp)
226{
227#ifdef CONFIG_X86_32
228 /*
229 * Align the stack pointer according to the i386 ABI,
230 * i.e. so that on function entry ((sp + 4) & 15) == 0.
231 */
Chang S. Bae939ef712021-05-18 13:03:16 -0700232 sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4;
Hiroshi Shimamoto1fae0272009-02-27 10:30:32 -0800233#else /* !CONFIG_X86_32 */
Chang S. Bae939ef712021-05-18 13:03:16 -0700234 sp = round_down(sp, FRAME_ALIGNMENT) - 8;
Hiroshi Shimamoto1fae0272009-02-27 10:30:32 -0800235#endif
236 return sp;
237}
238
Denys Vlasenkodae0f302015-09-28 14:23:57 +0200239static void __user *
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800240get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
241 void __user **fpstate)
242{
243 /* Default to using normal stack */
Chang S. Bae2beb4a52021-05-18 13:03:19 -0700244 bool nested_altstack = on_sig_stack(regs->sp);
245 bool entering_altstack = false;
Suresh Siddha72a671c2012-07-24 16:05:29 -0700246 unsigned long math_size = 0;
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800247 unsigned long sp = regs->sp;
Suresh Siddha72a671c2012-07-24 16:05:29 -0700248 unsigned long buf_fx = 0;
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800249
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800250 /* redzone */
Masahiro Yamada97f26452016-08-03 13:45:50 -0700251 if (IS_ENABLED(CONFIG_X86_64))
Suresh Siddha050902c2012-07-24 16:05:27 -0700252 sp -= 128;
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800253
Stas Sergeev0b4521e2016-04-14 23:20:02 +0300254 /* This is the X/Open sanctioned signal stack switching. */
255 if (ka->sa.sa_flags & SA_ONSTACK) {
Chang S. Bae2beb4a52021-05-18 13:03:19 -0700256 /*
257 * This checks nested_altstack via sas_ss_flags(). Sensible
258 * programs use SS_AUTODISARM, which disables that check, and
259 * programs that don't use SS_AUTODISARM get compatible.
260 */
261 if (sas_ss_flags(sp) == 0) {
Stas Sergeev0b4521e2016-04-14 23:20:02 +0300262 sp = current->sas_ss_sp + current->sas_ss_size;
Chang S. Bae2beb4a52021-05-18 13:03:19 -0700263 entering_altstack = true;
264 }
Masahiro Yamada97f26452016-08-03 13:45:50 -0700265 } else if (IS_ENABLED(CONFIG_X86_32) &&
Chang S. Bae2beb4a52021-05-18 13:03:19 -0700266 !nested_altstack &&
Andy Lutomirski99504812017-07-28 06:00:32 -0700267 regs->ss != __USER_DS &&
Stas Sergeev0b4521e2016-04-14 23:20:02 +0300268 !(ka->sa.sa_flags & SA_RESTORER) &&
269 ka->sa.sa_restorer) {
270 /* This is the legacy signal stack switching. */
271 sp = (unsigned long) ka->sa.sa_restorer;
Chang S. Bae2beb4a52021-05-18 13:03:19 -0700272 entering_altstack = true;
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800273 }
274
Sebastian Andrzej Siewior27221462019-04-03 18:41:36 +0200275 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
276 &buf_fx, &math_size);
277 *fpstate = (void __user *)sp;
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800278
Hiroshi Shimamoto14fc9fb2009-03-19 10:56:29 -0700279 sp = align_sigframe(sp - frame_size);
280
281 /*
282 * If we are on the alternate signal stack and would overflow it, don't.
283 * Return an always-bogus address instead so we will die with SIGSEGV.
284 */
Chang S. Bae2beb4a52021-05-18 13:03:19 -0700285 if (unlikely((nested_altstack || entering_altstack) &&
286 !__on_sig_stack(sp))) {
287
288 if (show_unhandled_signals && printk_ratelimit())
289 pr_info("%s[%d] overflowed sigaltstack\n",
290 current->comm, task_pid_nr(current));
291
Hiroshi Shimamoto14fc9fb2009-03-19 10:56:29 -0700292 return (void __user *)-1L;
Chang S. Bae2beb4a52021-05-18 13:03:19 -0700293 }
Hiroshi Shimamoto14fc9fb2009-03-19 10:56:29 -0700294
Suresh Siddha72a671c2012-07-24 16:05:29 -0700295 /* save i387 and extended state */
Thomas Gleixner052adee2021-09-08 15:29:32 +0200296 if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size))
Hiroshi Shimamoto14fc9fb2009-03-19 10:56:29 -0700297 return (void __user *)-1L;
298
299 return (void __user *)sp;
Hiroshi Shimamoto75779f02009-02-27 10:29:57 -0800300}
301
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800302#ifdef CONFIG_X86_32
303static const struct {
304 u16 poplmovl;
305 u32 val;
306 u16 int80;
307} __attribute__((packed)) retcode = {
308 0xb858, /* popl %eax; movl $..., %eax */
309 __NR_sigreturn,
310 0x80cd, /* int $0x80 */
311};
312
313static const struct {
314 u8 movl;
315 u32 val;
316 u16 int80;
317 u8 pad;
318} __attribute__((packed)) rt_retcode = {
319 0xb8, /* movl $..., %eax */
320 __NR_rt_sigreturn,
321 0x80cd, /* int $0x80 */
322 0
323};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Ingo Molnar7e907f42008-03-06 10:33:08 +0100325static int
Al Viro235b8022012-11-09 23:51:47 -0500326__setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
Hiroshi Shimamoto1d130242008-09-05 16:28:06 -0700327 struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 struct sigframe __user *frame;
Ingo Molnar7e907f42008-03-06 10:33:08 +0100330 void __user *restorer;
Al Virob00d8f82020-02-15 21:12:26 -0500331 void __user *fp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Al Virob00d8f82020-02-15 21:12:26 -0500333 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Al Viro5c1f1782020-02-15 21:18:02 -0500335 if (!user_access_begin(frame, sizeof(*frame)))
Hiroshi Shimamoto3d0aedd2008-09-12 17:01:09 -0700336 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Al Viro5c1f1782020-02-15 21:18:02 -0500338 unsafe_put_user(sig, &frame->sig, Efault);
Al Virob00d8f82020-02-15 21:12:26 -0500339 unsafe_put_sigcontext(&frame->sc, fp, regs, set, Efault);
Al Viro5c1f1782020-02-15 21:18:02 -0500340 unsafe_put_user(set->sig[1], &frame->extramask[0], Efault);
Roland McGrath1a3e4ca2008-04-09 01:29:27 -0700341 if (current->mm->context.vdso)
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700342 restorer = current->mm->context.vdso +
Andy Lutomirski0a6d1fa2015-10-05 17:47:56 -0700343 vdso_image_32.sym___kernel_sigreturn;
Andi Kleen9fbbd4d2007-02-13 13:26:26 +0100344 else
Jan Engelhardtade1af72008-01-30 13:33:23 +0100345 restorer = &frame->retcode;
Al Viro235b8022012-11-09 23:51:47 -0500346 if (ksig->ka.sa.sa_flags & SA_RESTORER)
347 restorer = ksig->ka.sa.sa_restorer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349 /* Set up to return from userspace. */
Al Viro5c1f1782020-02-15 21:18:02 -0500350 unsafe_put_user(restorer, &frame->pretcode, Efault);
Ingo Molnar7e907f42008-03-06 10:33:08 +0100351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 /*
Ingo Molnar7e907f42008-03-06 10:33:08 +0100353 * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 *
355 * WE DO NOT USE IT ANY MORE! It's only left here for historical
356 * reasons and because gdb uses it as a signature to notice
357 * signal handler stack frames.
358 */
Al Viro5c1f1782020-02-15 21:18:02 -0500359 unsafe_put_user(*((u64 *)&retcode), (u64 *)frame->retcode, Efault);
360 user_access_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 /* Set up registers for signal handler */
Ingo Molnar7e907f42008-03-06 10:33:08 +0100363 regs->sp = (unsigned long)frame;
Al Viro235b8022012-11-09 23:51:47 -0500364 regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
Ingo Molnar7e907f42008-03-06 10:33:08 +0100365 regs->ax = (unsigned long)sig;
Harvey Harrison92bc2052008-02-08 12:09:56 -0800366 regs->dx = 0;
367 regs->cx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100369 regs->ds = __USER_DS;
370 regs->es = __USER_DS;
371 regs->ss = __USER_DS;
372 regs->cs = __USER_CS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
David Howells283828f2006-01-18 17:44:00 -0800374 return 0;
Al Virob00d8f82020-02-15 21:12:26 -0500375
376Efault:
377 user_access_end();
378 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
Al Viro235b8022012-11-09 23:51:47 -0500381static int __setup_rt_frame(int sig, struct ksignal *ksig,
Hiroshi Shimamoto1d130242008-09-05 16:28:06 -0700382 sigset_t *set, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 struct rt_sigframe __user *frame;
Ingo Molnar7e907f42008-03-06 10:33:08 +0100385 void __user *restorer;
Al Virob00d8f82020-02-15 21:12:26 -0500386 void __user *fp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Al Virob00d8f82020-02-15 21:12:26 -0500388 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Al Viro119cd592020-02-15 19:54:56 -0500390 if (!user_access_begin(frame, sizeof(*frame)))
Hiroshi Shimamoto3d0aedd2008-09-12 17:01:09 -0700391 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Al Viro119cd592020-02-15 19:54:56 -0500393 unsafe_put_user(sig, &frame->sig, Efault);
394 unsafe_put_user(&frame->info, &frame->pinfo, Efault);
395 unsafe_put_user(&frame->uc, &frame->puc, Efault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Al Viro119cd592020-02-15 19:54:56 -0500397 /* Create the ucontext. */
398 if (static_cpu_has(X86_FEATURE_XSAVE))
399 unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault);
400 else
401 unsafe_put_user(0, &frame->uc.uc_flags, Efault);
402 unsafe_put_user(0, &frame->uc.uc_link, Efault);
403 unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Al Viro119cd592020-02-15 19:54:56 -0500405 /* Set up to return from userspace. */
406 restorer = current->mm->context.vdso +
407 vdso_image_32.sym___kernel_rt_sigreturn;
408 if (ksig->ka.sa.sa_flags & SA_RESTORER)
409 restorer = ksig->ka.sa.sa_restorer;
410 unsafe_put_user(restorer, &frame->pretcode, Efault);
Ingo Molnar7e907f42008-03-06 10:33:08 +0100411
Al Viro119cd592020-02-15 19:54:56 -0500412 /*
413 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
414 *
415 * WE DO NOT USE IT ANY MORE! It's only left here for historical
416 * reasons and because gdb uses it as a signature to notice
417 * signal handler stack frames.
418 */
419 unsafe_put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode, Efault);
Al Virob00d8f82020-02-15 21:12:26 -0500420 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
Al Virob87df652020-02-15 21:36:52 -0500421 unsafe_put_sigmask(set, frame, Efault);
Al Viro119cd592020-02-15 19:54:56 -0500422 user_access_end();
H. Peter Anvin49b8c6952012-09-21 17:18:44 -0700423
Al Viroead8e4e2020-02-15 21:22:39 -0500424 if (copy_siginfo_to_user(&frame->info, &ksig->info))
Hiroshi Shimamoto3d0aedd2008-09-12 17:01:09 -0700425 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427 /* Set up registers for signal handler */
Ingo Molnar7e907f42008-03-06 10:33:08 +0100428 regs->sp = (unsigned long)frame;
Al Viro235b8022012-11-09 23:51:47 -0500429 regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
Hiroshi Shimamoto13ad7722008-09-05 16:28:38 -0700430 regs->ax = (unsigned long)sig;
Ingo Molnar7e907f42008-03-06 10:33:08 +0100431 regs->dx = (unsigned long)&frame->info;
432 regs->cx = (unsigned long)&frame->uc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100434 regs->ds = __USER_DS;
435 regs->es = __USER_DS;
436 regs->ss = __USER_DS;
437 regs->cs = __USER_CS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
David Howells283828f2006-01-18 17:44:00 -0800439 return 0;
Al Viro119cd592020-02-15 19:54:56 -0500440Efault:
441 user_access_end();
442 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443}
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800444#else /* !CONFIG_X86_32 */
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800445static unsigned long frame_uc_flags(struct pt_regs *regs)
446{
447 unsigned long flags;
448
Borislav Petkovd366bf72016-04-04 22:25:02 +0200449 if (boot_cpu_has(X86_FEATURE_XSAVE))
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800450 flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
451 else
452 flags = UC_SIGCONTEXT_SS;
453
454 if (likely(user_64bit_mode(regs)))
455 flags |= UC_STRICT_RESTORE_SS;
456
457 return flags;
458}
459
Al Viro235b8022012-11-09 23:51:47 -0500460static int __setup_rt_frame(int sig, struct ksignal *ksig,
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800461 sigset_t *set, struct pt_regs *regs)
462{
463 struct rt_sigframe __user *frame;
464 void __user *fp = NULL;
Peter Zijlstra88e47182019-04-03 09:39:48 +0200465 unsigned long uc_flags;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800466
Al Viro119cd592020-02-15 19:54:56 -0500467 /* x86-64 should always use SA_RESTORER. */
468 if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
469 return -EFAULT;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800470
Al Viro235b8022012-11-09 23:51:47 -0500471 frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
Peter Zijlstra88e47182019-04-03 09:39:48 +0200472 uc_flags = frame_uc_flags(regs);
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800473
Al Viro119cd592020-02-15 19:54:56 -0500474 if (!user_access_begin(frame, sizeof(*frame)))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800475 return -EFAULT;
476
Al Viro119cd592020-02-15 19:54:56 -0500477 /* Create the ucontext. */
478 unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
479 unsafe_put_user(0, &frame->uc.uc_link, Efault);
480 unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800481
Al Viro119cd592020-02-15 19:54:56 -0500482 /* Set up to return from userspace. If provided, use a stub
483 already in userspace. */
484 unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault);
Al Virob00d8f82020-02-15 21:12:26 -0500485 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
Al Virob87df652020-02-15 21:36:52 -0500486 unsafe_put_sigmask(set, frame, Efault);
Al Viro119cd592020-02-15 19:54:56 -0500487 user_access_end();
H. Peter Anvin5e883532012-09-21 12:43:15 -0700488
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800489 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
490 if (copy_siginfo_to_user(&frame->info, &ksig->info))
491 return -EFAULT;
492 }
493
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800494 /* Set up registers for signal handler */
495 regs->di = sig;
496 /* In case the signal handler was declared without prototypes */
497 regs->ax = 0;
498
499 /* This also works for non SA_SIGINFO handlers because they expect the
500 next argument after the signal number on the stack. */
501 regs->si = (unsigned long)&frame->info;
502 regs->dx = (unsigned long)&frame->uc;
Al Viro235b8022012-11-09 23:51:47 -0500503 regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800504
505 regs->sp = (unsigned long)frame;
506
Andy Lutomirski8ff5bd22016-02-16 15:09:02 -0800507 /*
508 * Set up the CS and SS registers to run signal handlers in
509 * 64-bit mode, even if the handler happens to be interrupting
510 * 32-bit or 16-bit code.
511 *
512 * SS is subtle. In 64-bit mode, we don't need any particular
513 * SS descriptor, but we do need SS to be valid. It's possible
514 * that the old SS is entirely bogus -- this can happen if the
515 * signal we're trying to deliver is #GP or #SS caused by a bad
Ingo Molnard9f6e122021-03-18 15:28:01 +0100516 * SS value. We also have a compatibility issue here: DOSEMU
Andy Lutomirski8ff5bd22016-02-16 15:09:02 -0800517 * relies on the contents of the SS register indicating the
518 * SS value at the time of the signal, even though that code in
519 * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
520 * avoids relying on sigreturn to restore SS; instead it uses
521 * a trampoline.) So we do our best: if the old SS was valid,
522 * we keep it. Otherwise we replace it.
523 */
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800524 regs->cs = __USER_CS;
525
Andy Lutomirski8ff5bd22016-02-16 15:09:02 -0800526 if (unlikely(regs->ss != __USER_DS))
527 force_valid_ss(regs);
528
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800529 return 0;
Al Viro119cd592020-02-15 19:54:56 -0500530
531Efault:
532 user_access_end();
533 return -EFAULT;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800534}
535#endif /* CONFIG_X86_32 */
536
Christoph Hellwigc3b3f522020-05-05 12:12:53 +0200537#ifdef CONFIG_X86_X32_ABI
538static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to,
539 const struct kernel_siginfo *from)
540{
541 struct compat_siginfo new;
542
543 copy_siginfo_to_external32(&new, from);
544 if (from->si_signo == SIGCHLD) {
545 new._sifields._sigchld_x32._utime = from->si_utime;
546 new._sifields._sigchld_x32._stime = from->si_stime;
547 }
548 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
549 return -EFAULT;
550 return 0;
551}
552
553int copy_siginfo_to_user32(struct compat_siginfo __user *to,
554 const struct kernel_siginfo *from)
555{
556 if (in_x32_syscall())
557 return x32_copy_siginfo_to_user(to, from);
558 return __copy_siginfo_to_user32(to, from);
559}
560#endif /* CONFIG_X86_X32_ABI */
561
Al Viro235b8022012-11-09 23:51:47 -0500562static int x32_setup_rt_frame(struct ksignal *ksig,
563 compat_sigset_t *set,
Suresh Siddha050902c2012-07-24 16:05:27 -0700564 struct pt_regs *regs)
565{
566#ifdef CONFIG_X86_X32_ABI
567 struct rt_sigframe_x32 __user *frame;
Peter Zijlstra88e47182019-04-03 09:39:48 +0200568 unsigned long uc_flags;
Suresh Siddha050902c2012-07-24 16:05:27 -0700569 void __user *restorer;
Al Virob00d8f82020-02-15 21:12:26 -0500570 void __user *fp = NULL;
Suresh Siddha050902c2012-07-24 16:05:27 -0700571
Al Viro39f16c12020-02-15 18:39:17 -0500572 if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
Suresh Siddha050902c2012-07-24 16:05:27 -0700573 return -EFAULT;
574
Al Virob00d8f82020-02-15 21:12:26 -0500575 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
Suresh Siddha050902c2012-07-24 16:05:27 -0700576
Peter Zijlstra88e47182019-04-03 09:39:48 +0200577 uc_flags = frame_uc_flags(regs);
578
Al Viro39f16c12020-02-15 18:39:17 -0500579 if (!user_access_begin(frame, sizeof(*frame)))
580 return -EFAULT;
Suresh Siddha050902c2012-07-24 16:05:27 -0700581
Al Viro39f16c12020-02-15 18:39:17 -0500582 /* Create the ucontext. */
583 unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
584 unsafe_put_user(0, &frame->uc.uc_link, Efault);
585 unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
586 unsafe_put_user(0, &frame->uc.uc__pad0, Efault);
587 restorer = ksig->ka.sa.sa_restorer;
588 unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault);
Al Virob00d8f82020-02-15 21:12:26 -0500589 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
Al Virob87df652020-02-15 21:36:52 -0500590 unsafe_put_sigmask(set, frame, Efault);
Al Viro39f16c12020-02-15 18:39:17 -0500591 user_access_end();
H. Peter Anvin49b8c6952012-09-21 17:18:44 -0700592
Suresh Siddha050902c2012-07-24 16:05:27 -0700593 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
Christoph Hellwigc3b3f522020-05-05 12:12:53 +0200594 if (x32_copy_siginfo_to_user(&frame->info, &ksig->info))
Suresh Siddha050902c2012-07-24 16:05:27 -0700595 return -EFAULT;
596 }
597
Suresh Siddha050902c2012-07-24 16:05:27 -0700598 /* Set up registers for signal handler */
599 regs->sp = (unsigned long) frame;
Al Viro235b8022012-11-09 23:51:47 -0500600 regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
Suresh Siddha050902c2012-07-24 16:05:27 -0700601
602 /* We use the x32 calling convention here... */
Al Viro235b8022012-11-09 23:51:47 -0500603 regs->di = ksig->sig;
Suresh Siddha050902c2012-07-24 16:05:27 -0700604 regs->si = (unsigned long) &frame->info;
605 regs->dx = (unsigned long) &frame->uc;
606
607 loadsegment(ds, __USER_DS);
608 loadsegment(es, __USER_DS);
609
610 regs->cs = __USER_CS;
611 regs->ss = __USER_DS;
612#endif /* CONFIG_X86_X32_ABI */
613
614 return 0;
Al Viro39f16c12020-02-15 18:39:17 -0500615#ifdef CONFIG_X86_X32_ABI
616Efault:
617 user_access_end();
618 return -EFAULT;
619#endif
Suresh Siddha050902c2012-07-24 16:05:27 -0700620}
621
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800622/*
623 * Do a signal return; undo the signal stack.
624 */
Hiroshi Shimamotoe5fa2d02008-11-24 18:24:11 -0800625#ifdef CONFIG_X86_32
Tautschnig, Michael4c8ca512018-03-14 09:41:42 +0000626SYSCALL_DEFINE0(sigreturn)
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800627{
Al Viro3fe26fa2012-11-12 14:32:42 -0500628 struct pt_regs *regs = current_pt_regs();
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800629 struct sigframe __user *frame;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800630 sigset_t set;
631
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800632 frame = (struct sigframe __user *)(regs->sp - 8);
633
Linus Torvalds96d4f262019-01-03 18:57:57 -0800634 if (!access_ok(frame, sizeof(*frame)))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800635 goto badframe;
Al Viro71c33132020-02-15 11:43:18 -0500636 if (__get_user(set.sig[0], &frame->sc.oldmask) ||
637 __get_user(set.sig[1], &frame->extramask[0]))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800638 goto badframe;
639
Oleg Nesterov39822942011-07-10 21:27:27 +0200640 set_current_blocked(&set);
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800641
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800642 /*
643 * x86_32 has no uc_flags bits relevant to restore_sigcontext.
644 * Save a few cycles by skipping the __get_user.
645 */
Thomas Gleixneree4ecdf2021-09-08 15:29:35 +0200646 if (!restore_sigcontext(regs, &frame->sc, 0))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800647 goto badframe;
Brian Gerst6a3713f2015-04-04 08:58:23 -0400648 return regs->ax;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800649
650badframe:
Hiroshi Shimamotoae417bb2008-12-16 14:02:16 -0800651 signal_fault(regs, frame, "sigreturn");
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800652
653 return 0;
654}
Hiroshi Shimamotoe5fa2d02008-11-24 18:24:11 -0800655#endif /* CONFIG_X86_32 */
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800656
Tautschnig, Michael4c8ca512018-03-14 09:41:42 +0000657SYSCALL_DEFINE0(rt_sigreturn)
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800658{
Al Viro3fe26fa2012-11-12 14:32:42 -0500659 struct pt_regs *regs = current_pt_regs();
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800660 struct rt_sigframe __user *frame;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800661 sigset_t set;
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800662 unsigned long uc_flags;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800663
664 frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
Linus Torvalds96d4f262019-01-03 18:57:57 -0800665 if (!access_ok(frame, sizeof(*frame)))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800666 goto badframe;
Al Viro71c33132020-02-15 11:43:18 -0500667 if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800668 goto badframe;
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800669 if (__get_user(uc_flags, &frame->uc.uc_flags))
670 goto badframe;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800671
Oleg Nesterove9bd3f02011-04-27 21:09:39 +0200672 set_current_blocked(&set);
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800673
Thomas Gleixneree4ecdf2021-09-08 15:29:35 +0200674 if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800675 goto badframe;
676
Al Viroc40702c2012-11-20 14:24:26 -0500677 if (restore_altstack(&frame->uc.uc_stack))
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800678 goto badframe;
679
Brian Gerst6a3713f2015-04-04 08:58:23 -0400680 return regs->ax;
Hiroshi Shimamotobfeb91a2008-11-24 18:23:12 -0800681
682badframe:
683 signal_fault(regs, frame, "rt_sigreturn");
684 return 0;
685}
686
Chang S. Bae939ef712021-05-18 13:03:16 -0700687/*
688 * There are four different struct types for signal frame: sigframe_ia32,
689 * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case
690 * -- the largest size. It means the size for 64-bit apps is a bit more
691 * than needed, but this keeps the code simple.
692 */
693#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
694# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32)
695#else
696# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe)
697#endif
698
699/*
700 * The FP state frame contains an XSAVE buffer which must be 64-byte aligned.
701 * If a signal frame starts at an unaligned address, extra space is required.
702 * This is the max alignment padding, conservatively.
703 */
704#define MAX_XSAVE_PADDING 63UL
705
706/*
707 * The frame data is composed of the following areas and laid out as:
708 *
709 * -------------------------
710 * | alignment padding |
711 * -------------------------
712 * | (f)xsave frame |
713 * -------------------------
714 * | fsave header |
715 * -------------------------
716 * | alignment padding |
717 * -------------------------
718 * | siginfo + ucontext |
719 * -------------------------
720 */
721
722/* max_frame_size tells userspace the worst case signal stack size. */
723static unsigned long __ro_after_init max_frame_size;
Thomas Gleixner4b7ca602021-10-21 15:55:12 -0700724static unsigned int __ro_after_init fpu_default_state_size;
Chang S. Bae939ef712021-05-18 13:03:16 -0700725
726void __init init_sigframe_size(void)
727{
Thomas Gleixner4b7ca602021-10-21 15:55:12 -0700728 fpu_default_state_size = fpu__get_fpstate_size();
729
Chang S. Bae939ef712021-05-18 13:03:16 -0700730 max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING;
731
Thomas Gleixner4b7ca602021-10-21 15:55:12 -0700732 max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING;
Chang S. Bae939ef712021-05-18 13:03:16 -0700733
734 /* Userspace expects an aligned size. */
735 max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
736
737 pr_info("max sigframe size: %lu\n", max_frame_size);
738}
739
Chang S. Bae1c33bb02021-05-18 13:03:17 -0700740unsigned long get_sigframe_size(void)
741{
742 return max_frame_size;
743}
744
Dmitry Safonov68463512016-09-05 16:33:08 +0300745static inline int is_ia32_compat_frame(struct ksignal *ksig)
Ingo Molnar05012c12015-04-30 07:26:04 +0200746{
Masahiro Yamada97f26452016-08-03 13:45:50 -0700747 return IS_ENABLED(CONFIG_IA32_EMULATION) &&
Dmitry Safonov68463512016-09-05 16:33:08 +0300748 ksig->ka.sa.sa_flags & SA_IA32_ABI;
Ingo Molnar05012c12015-04-30 07:26:04 +0200749}
750
Dmitry Safonov68463512016-09-05 16:33:08 +0300751static inline int is_ia32_frame(struct ksignal *ksig)
Ingo Molnar05012c12015-04-30 07:26:04 +0200752{
Dmitry Safonov68463512016-09-05 16:33:08 +0300753 return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig);
Ingo Molnar05012c12015-04-30 07:26:04 +0200754}
755
Dmitry Safonov68463512016-09-05 16:33:08 +0300756static inline int is_x32_frame(struct ksignal *ksig)
Ingo Molnar05012c12015-04-30 07:26:04 +0200757{
Dmitry Safonov68463512016-09-05 16:33:08 +0300758 return IS_ENABLED(CONFIG_X86_X32_ABI) &&
759 ksig->ka.sa.sa_flags & SA_X32_ABI;
Ingo Molnar05012c12015-04-30 07:26:04 +0200760}
761
Roland McGrath7c1def12005-06-23 00:08:21 -0700762static int
Al Viro235b8022012-11-09 23:51:47 -0500763setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
Hiroshi Shimamoto1d130242008-09-05 16:28:06 -0700764{
Richard Weinberger3050a352014-07-13 17:43:51 +0200765 int usig = ksig->sig;
Al Virob7f9a112012-05-02 09:59:21 -0400766 sigset_t *set = sigmask_to_save();
Suresh Siddha050902c2012-07-24 16:05:27 -0700767 compat_sigset_t *cset = (compat_sigset_t *) set;
Hiroshi Shimamoto1d130242008-09-05 16:28:06 -0700768
Mathieu Desnoyersbff95042019-03-05 14:47:53 -0500769 /* Perform fixup for the pre-signal frame. */
Will Deacon784e0302018-06-22 11:45:07 +0100770 rseq_signal_deliver(ksig, regs);
Mathieu Desnoyersd6761b82018-06-02 08:43:58 -0400771
Hiroshi Shimamoto1d130242008-09-05 16:28:06 -0700772 /* Set up the stack frame */
Dmitry Safonov68463512016-09-05 16:33:08 +0300773 if (is_ia32_frame(ksig)) {
Al Viro235b8022012-11-09 23:51:47 -0500774 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
775 return ia32_setup_rt_frame(usig, ksig, cset, regs);
Hiroshi Shimamoto455edbc2008-09-24 19:13:11 -0700776 else
Al Viro235b8022012-11-09 23:51:47 -0500777 return ia32_setup_frame(usig, ksig, cset, regs);
Dmitry Safonov68463512016-09-05 16:33:08 +0300778 } else if (is_x32_frame(ksig)) {
Al Viro235b8022012-11-09 23:51:47 -0500779 return x32_setup_rt_frame(ksig, cset, regs);
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800780 } else {
Al Viro235b8022012-11-09 23:51:47 -0500781 return __setup_rt_frame(ksig->sig, ksig, set, regs);
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800782 }
Hiroshi Shimamoto1d130242008-09-05 16:28:06 -0700783}
784
Al Viroa610d6e2012-05-21 23:42:15 -0400785static void
Al Viro235b8022012-11-09 23:51:47 -0500786handle_signal(struct ksignal *ksig, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787{
Oleg Nesterovfd0f86b2015-04-16 00:40:25 -0700788 bool stepping, failed;
Ingo Molnarc5bedc62015-04-23 12:49:20 +0200789 struct fpu *fpu = &current->thread.fpu;
Oleg Nesterovfd0f86b2015-04-16 00:40:25 -0700790
Brian Gerst5ed92a82015-07-29 01:41:19 -0400791 if (v8086_mode(regs))
792 save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
793
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 /* Are we from a system call? */
H. Peter Anvin9ddcb872021-05-10 11:53:15 -0700795 if (syscall_get_nr(current, regs) != -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 /* If so, check system call restarting.. */
Hiroshi Shimamotobb579252008-09-05 16:26:55 -0700797 switch (syscall_get_error(current, regs)) {
Harvey Harrisonac66f3f2008-02-08 12:09:58 -0800798 case -ERESTART_RESTARTBLOCK:
799 case -ERESTARTNOHAND:
800 regs->ax = -EINTR;
801 break;
802
803 case -ERESTARTSYS:
Al Viro235b8022012-11-09 23:51:47 -0500804 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100805 regs->ax = -EINTR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 break;
Harvey Harrisonac66f3f2008-02-08 12:09:58 -0800807 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500808 fallthrough;
Harvey Harrisonac66f3f2008-02-08 12:09:58 -0800809 case -ERESTARTNOINTR:
810 regs->ax = regs->orig_ax;
811 regs->ip -= 2;
812 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 }
814 }
815
816 /*
Oleg Nesterovfd0f86b2015-04-16 00:40:25 -0700817 * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
818 * so that register information in the sigcontext is correct and
819 * then notify the tracer before entering the signal handler.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 */
Oleg Nesterovfd0f86b2015-04-16 00:40:25 -0700821 stepping = test_thread_flag(TIF_SINGLESTEP);
822 if (stepping)
823 user_disable_single_step(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
Al Viro235b8022012-11-09 23:51:47 -0500825 failed = (setup_rt_frame(ksig, regs) < 0);
826 if (!failed) {
827 /*
828 * Clear the direction flag as per the ABI for function entry.
Jiri Olsaddd40da2013-05-01 17:25:43 +0200829 *
Jiri Olsa24cda102013-05-01 17:25:42 +0200830 * Clear RF when entering the signal handler, because
831 * it might disable possible debug exception from the
832 * signal handler.
Jiri Olsaddd40da2013-05-01 17:25:43 +0200833 *
Oleg Nesterovfd0f86b2015-04-16 00:40:25 -0700834 * Clear TF for the case when it wasn't set by debugger to
835 * avoid the recursive send_sigtrap() in SIGTRAP handler.
Al Viro235b8022012-11-09 23:51:47 -0500836 */
Jiri Olsaddd40da2013-05-01 17:25:43 +0200837 regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
Oleg Nesterov66463db2014-09-02 19:57:13 +0200838 /*
839 * Ensure the signal handler starts with the new fpu state.
840 */
Fenghua Yub860eb82020-05-12 07:54:39 -0700841 fpu__clear_user_states(fpu);
Al Viroa610d6e2012-05-21 23:42:15 -0400842 }
Oleg Nesterovfd0f86b2015-04-16 00:40:25 -0700843 signal_setup_done(failed, ksig, stepping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844}
845
Dmitry V. Levin22eab1102015-12-01 00:54:36 +0300846static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
847{
Andy Lutomirski609c19a2016-07-26 23:12:22 -0700848#ifdef CONFIG_IA32_EMULATION
Oleg Nesterovb2e9df82021-02-01 18:47:16 +0100849 if (current->restart_block.arch_data & TS_COMPAT)
Dmitry V. Levin95d97ad2015-12-17 23:56:52 +0000850 return __NR_ia32_restart_syscall;
851#endif
852#ifdef CONFIG_X86_X32_ABI
853 return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
854#else
Dmitry V. Levin22eab1102015-12-01 00:54:36 +0300855 return __NR_restart_syscall;
Dmitry V. Levin95d97ad2015-12-17 23:56:52 +0000856#endif
Dmitry V. Levin22eab1102015-12-01 00:54:36 +0300857}
Hiroshi Shimamoto57917752008-10-29 18:46:40 -0700858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859/*
860 * Note that 'init' is a special process: it doesn't get signals it doesn't
861 * want to handle. Thus you cannot kill init even with a SIGKILL even by
862 * mistake.
863 */
Jens Axboe12db8b62020-10-26 14:32:28 -0600864void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
Al Viro235b8022012-11-09 23:51:47 -0500866 struct ksignal ksig;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Jens Axboe12db8b62020-10-26 14:32:28 -0600868 if (has_signal && get_signal(&ksig)) {
Ingo Molnar7e907f42008-03-06 10:33:08 +0100869 /* Whee! Actually deliver the signal. */
Al Viro235b8022012-11-09 23:51:47 -0500870 handle_signal(&ksig, regs);
David Howells283828f2006-01-18 17:44:00 -0800871 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 }
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 /* Did we come from a system call? */
H. Peter Anvin9ddcb872021-05-10 11:53:15 -0700875 if (syscall_get_nr(current, regs) != -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 /* Restart the system call - no handlers present */
Hiroshi Shimamotobb579252008-09-05 16:26:55 -0700877 switch (syscall_get_error(current, regs)) {
David Howells283828f2006-01-18 17:44:00 -0800878 case -ERESTARTNOHAND:
879 case -ERESTARTSYS:
880 case -ERESTARTNOINTR:
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100881 regs->ax = regs->orig_ax;
882 regs->ip -= 2;
David Howells283828f2006-01-18 17:44:00 -0800883 break;
884
885 case -ERESTART_RESTARTBLOCK:
Dmitry V. Levin22eab1102015-12-01 00:54:36 +0300886 regs->ax = get_nr_restart_syscall(regs);
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100887 regs->ip -= 2;
David Howells283828f2006-01-18 17:44:00 -0800888 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 }
890 }
David Howells283828f2006-01-18 17:44:00 -0800891
Harvey Harrisonac66f3f2008-02-08 12:09:58 -0800892 /*
893 * If there's no signal to deliver, we just put the saved sigmask
894 * back.
895 */
Al Viro51a7b442012-05-21 23:33:55 -0400896 restore_saved_sigmask();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897}
898
Hiroshi Shimamoto72fa50f2008-09-05 16:27:11 -0700899void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
900{
901 struct task_struct *me = current;
902
903 if (show_unhandled_signals && printk_ratelimit()) {
Hiroshi Shimamotoae417bb2008-12-16 14:02:16 -0800904 printk("%s"
Hiroshi Shimamoto72fa50f2008-09-05 16:27:11 -0700905 "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
Hiroshi Shimamotoae417bb2008-12-16 14:02:16 -0800906 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
Hiroshi Shimamoto72fa50f2008-09-05 16:27:11 -0700907 me->comm, me->pid, where, frame,
908 regs->ip, regs->sp, regs->orig_ax);
Markus Trippelsdorf1c99a682017-04-07 14:09:04 +0200909 print_vma_addr(KERN_CONT " in ", regs->ip);
Joe Perchesc767a542012-05-21 19:50:07 -0700910 pr_cont("\n");
Hiroshi Shimamoto72fa50f2008-09-05 16:27:11 -0700911 }
912
Eric W. Biederman3cf5d072019-05-23 10:17:27 -0500913 force_sig(SIGSEGV);
Hiroshi Shimamoto72fa50f2008-09-05 16:27:11 -0700914}
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800915
Thomas Gleixner3aac3eb2021-10-21 15:55:06 -0700916#ifdef CONFIG_DYNAMIC_SIGFRAME
917#ifdef CONFIG_STRICT_SIGALTSTACK_SIZE
918static bool strict_sigaltstack_size __ro_after_init = true;
919#else
920static bool strict_sigaltstack_size __ro_after_init = false;
921#endif
922
923static int __init strict_sas_size(char *arg)
924{
925 return kstrtobool(arg, &strict_sigaltstack_size);
926}
927__setup("strict_sas_size", strict_sas_size);
928
929/*
930 * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512
931 * exceeds that size already. As such programs might never use the
932 * sigaltstack they just continued to work. While always checking against
933 * the real size would be correct, this might be considered a regression.
934 *
Thomas Gleixner4b7ca602021-10-21 15:55:12 -0700935 * Therefore avoid the sanity check, unless enforced by kernel
936 * configuration or command line option.
937 *
938 * When dynamic FPU features are supported, the check is also enforced when
939 * the task has permissions to use dynamic features. Tasks which have no
940 * permission are checked against the size of the non-dynamic feature set
941 * if strict checking is enabled. This avoids forcing all tasks on the
942 * system to allocate large sigaltstacks even if they are never going
943 * to use a dynamic feature. As this is serialized via sighand::siglock
944 * any permission request for a dynamic feature either happened already
945 * or will see the newly install sigaltstack size in the permission checks.
Thomas Gleixner3aac3eb2021-10-21 15:55:06 -0700946 */
947bool sigaltstack_size_valid(size_t ss_size)
948{
Thomas Gleixner4b7ca602021-10-21 15:55:12 -0700949 unsigned long fsize = max_frame_size - fpu_default_state_size;
950 u64 mask;
951
Thomas Gleixner3aac3eb2021-10-21 15:55:06 -0700952 lockdep_assert_held(&current->sighand->siglock);
953
Thomas Gleixner4b7ca602021-10-21 15:55:12 -0700954 if (!fpu_state_size_dynamic() && !strict_sigaltstack_size)
955 return true;
956
957 fsize += current->group_leader->thread.fpu.perm.__user_state_size;
958 if (likely(ss_size > fsize))
959 return true;
960
Thomas Gleixner3aac3eb2021-10-21 15:55:06 -0700961 if (strict_sigaltstack_size)
Thomas Gleixner4b7ca602021-10-21 15:55:12 -0700962 return ss_size > fsize;
963
964 mask = current->group_leader->thread.fpu.perm.__state_perm;
965 if (mask & XFEATURE_MASK_USER_DYNAMIC)
966 return ss_size > fsize;
Thomas Gleixner3aac3eb2021-10-21 15:55:06 -0700967
968 return true;
969}
970#endif /* CONFIG_DYNAMIC_SIGFRAME */
971
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800972#ifdef CONFIG_X86_X32_ABI
Brian Gerst27dd84f2020-03-13 15:51:31 -0400973COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800974{
Al Viro3fe26fa2012-11-12 14:32:42 -0500975 struct pt_regs *regs = current_pt_regs();
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800976 struct rt_sigframe_x32 __user *frame;
977 sigset_t set;
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800978 unsigned long uc_flags;
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800979
980 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
981
Linus Torvalds96d4f262019-01-03 18:57:57 -0800982 if (!access_ok(frame, sizeof(*frame)))
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800983 goto badframe;
Al Viro71c33132020-02-15 11:43:18 -0500984 if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask))
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800985 goto badframe;
Andy Lutomirski6c25da52016-02-16 15:09:03 -0800986 if (__get_user(uc_flags, &frame->uc.uc_flags))
987 goto badframe;
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800988
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800989 set_current_blocked(&set);
990
Thomas Gleixneree4ecdf2021-09-08 15:29:35 +0200991 if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800992 goto badframe;
993
Al Viro90268432012-12-14 14:47:53 -0500994 if (compat_restore_altstack(&frame->uc.uc_stack))
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800995 goto badframe;
996
Brian Gerst6a3713f2015-04-04 08:58:23 -0400997 return regs->ax;
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800998
999badframe:
1000 signal_fault(regs, frame, "x32 rt_sigreturn");
1001 return 0;
1002}
1003#endif