blob: 2399e910d10901f0fccacad4950e73afc3fa261e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 */
7
8/*
9 * This file handles the architecture-dependent parts of process handling..
10 */
11
Zwane Mwaikambof3705132005-06-25 14:54:50 -070012#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/errno.h>
14#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +010015#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010016#include <linux/sched/task_stack.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/fs.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/elfcore.h>
21#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/stddef.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/delay.h>
28#include <linux/reboot.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/mc146818rtc.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040030#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/kallsyms.h>
32#include <linux/ptrace.h>
Andi Kleenc16b63e02006-09-26 10:52:28 +020033#include <linux/personality.h>
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020034#include <linux/percpu.h>
Erik Bosman529e25f2008-04-14 00:24:18 +020035#include <linux/prctl.h>
Frederic Weisbecker8b96f012008-12-06 03:40:00 +010036#include <linux/ftrace.h>
Jaswinder Singh Rajputbefa9e72009-01-04 16:18:56 +053037#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/kdebug.h>
Kyle Huey79170fd2017-03-20 01:16:24 -070040#include <linux/syscalls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/ldt.h>
44#include <asm/processor.h>
Ingo Molnar78f7f1e2015-04-24 02:54:44 +020045#include <asm/fpu/internal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/desc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/err.h>
49
Zwane Mwaikambof3705132005-06-25 14:54:50 -070050#include <asm/tlbflush.h>
51#include <asm/cpu.h>
Jaswinder Singhbbc1f692008-07-21 21:34:13 +053052#include <asm/syscalls.h>
K.Prasad66cb5912009-06-01 23:44:55 +053053#include <asm/debugreg.h>
David Howellsf05e7982012-03-28 18:11:12 +010054#include <asm/switch_to.h>
Brian Gerstba3e1272015-07-29 01:41:21 -040055#include <asm/vm86.h>
Babu Mogerfa7d9492018-11-21 20:28:25 +000056#include <asm/resctrl_sched.h>
Kyle Huey79170fd2017-03-20 01:16:24 -070057#include <asm/proto.h>
Zwane Mwaikambof3705132005-06-25 14:54:50 -070058
Thomas Gleixnerff167012018-11-25 19:33:47 +010059#include "process.h"
60
Jann Horn9fe62992018-08-31 21:41:51 +020061void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
63 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
Alan Sternbb1995d2007-07-21 17:10:42 +020064 unsigned long d0, d1, d2, d3, d6, d7;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010065 unsigned long sp;
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +020066 unsigned short ss, gs;
67
Andy Lutomirskif39b6f02015-03-18 18:33:33 -070068 if (user_mode(regs)) {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010069 sp = regs->sp;
Andy Lutomirski99504812017-07-28 06:00:32 -070070 ss = regs->ss;
Tejun Heod9a89a22009-02-09 22:17:40 +090071 gs = get_user_gs(regs);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +020072 } else {
H. Peter Anvindef3c5d2009-10-12 14:09:07 -070073 sp = kernel_stack_pointer(regs);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +020074 savesegment(ss, ss);
75 savesegment(gs, gs);
76 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Borislav Petkov7cccf072018-04-17 18:11:22 +020078 show_ip(regs, KERN_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Pekka Enbergd015a092009-12-28 10:26:59 +020080 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010081 regs->ax, regs->bx, regs->cx, regs->dx);
Pekka Enbergd015a092009-12-28 10:26:59 +020082 printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010083 regs->si, regs->di, regs->bp, sp);
Borislav Petkov7cccf072018-04-17 18:11:22 +020084 printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
85 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +020086
Jann Horn9fe62992018-08-31 21:41:51 +020087 if (mode != SHOW_REGS_ALL)
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +020088 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -070090 cr0 = read_cr0();
91 cr2 = read_cr2();
Andy Lutomirski6c690ee2017-06-12 10:26:14 -070092 cr3 = __read_cr3();
Andy Lutomirski1ef55be12016-09-29 12:48:12 -070093 cr4 = __read_cr4();
Pekka Enbergd015a092009-12-28 10:26:59 +020094 printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +020095 cr0, cr2, cr3, cr4);
Alan Sternbb1995d2007-07-21 17:10:42 +020096
97 get_debugreg(d0, 0);
98 get_debugreg(d1, 1);
99 get_debugreg(d2, 2);
100 get_debugreg(d3, 3);
Alan Sternbb1995d2007-07-21 17:10:42 +0200101 get_debugreg(d6, 6);
102 get_debugreg(d7, 7);
Dave Jones43387742013-06-18 12:09:11 -0400103
104 /* Only print out debug registers if they are in their non-default state. */
105 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
106 (d6 == DR6_RESERVED) && (d7 == 0x400))
107 return;
108
109 printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
110 d0, d1, d2, d3);
Pekka Enbergd015a092009-12-28 10:26:59 +0200111 printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200112 d6, d7);
113}
Alan Sternbb1995d2007-07-21 17:10:42 +0200114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115void release_thread(struct task_struct *dead_task)
116{
Zachary Amsden26849272006-01-06 00:11:59 -0800117 BUG_ON(dead_task->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 release_vm86_irqs(dead_task);
119}
120
Josh Triplettc1bd55f2015-06-30 15:00:00 -0700121int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
122 unsigned long arg, struct task_struct *p, unsigned long tls)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
Al Viro7076aad2012-09-10 16:44:54 -0400124 struct pt_regs *childregs = task_pt_regs(p);
Brian Gerst01003012016-08-13 12:38:19 -0400125 struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs);
126 struct inactive_task_frame *frame = &fork_frame->frame;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 struct task_struct *tsk;
128 int err;
129
Peter Zijlstra6690e862019-02-14 10:30:52 +0100130 /*
131 * For a new task use the RESET flags value since there is no before.
132 * All the status flags are zero; DF and all the system flags must also
133 * be 0, specifically IF must be 0 because we context switch to the new
134 * task with interrupts disabled.
135 */
136 frame->flags = X86_EFLAGS_FIXED;
Brian Gerst01003012016-08-13 12:38:19 -0400137 frame->bp = 0;
Brian Gerst616d2482016-08-13 12:38:20 -0400138 frame->ret_addr = (unsigned long) ret_from_fork;
Brian Gerst01003012016-08-13 12:38:19 -0400139 p->thread.sp = (unsigned long) fork_frame;
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100140 p->thread.sp0 = (unsigned long) (childregs+1);
Oleg Nesterov6f46b3a2014-09-02 19:57:33 +0200141 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Al Viro1d4b4b22012-10-22 22:34:11 -0400143 if (unlikely(p->flags & PF_KTHREAD)) {
Al Viro7076aad2012-09-10 16:44:54 -0400144 /* kernel thread */
145 memset(childregs, 0, sizeof(struct pt_regs));
Brian Gerst616d2482016-08-13 12:38:20 -0400146 frame->bx = sp; /* function */
147 frame->di = arg;
Al Viro7076aad2012-09-10 16:44:54 -0400148 p->thread.io_bitmap_ptr = NULL;
Al Viro7076aad2012-09-10 16:44:54 -0400149 return 0;
150 }
Brian Gerst616d2482016-08-13 12:38:20 -0400151 frame->bx = 0;
Al Viro1d4b4b22012-10-22 22:34:11 -0400152 *childregs = *current_pt_regs();
Al Viro7076aad2012-09-10 16:44:54 -0400153 childregs->ax = 0;
Al Viro1d4b4b22012-10-22 22:34:11 -0400154 if (sp)
155 childregs->sp = sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Al Viro1d4b4b22012-10-22 22:34:11 -0400157 task_user_gs(p) = get_user_gs(current_pt_regs());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
K.Prasad66cb5912009-06-01 23:44:55 +0530159 p->thread.io_bitmap_ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 tsk = current;
K.Prasad66cb5912009-06-01 23:44:55 +0530161 err = -ENOMEM;
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200162
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400163 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
Alexey Dobriyan52978be2006-09-30 23:27:21 -0700164 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
165 IO_BITMAP_BYTES, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 if (!p->thread.io_bitmap_ptr) {
167 p->thread.io_bitmap_max = 0;
168 return -ENOMEM;
169 }
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400170 set_tsk_thread_flag(p, TIF_IO_BITMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 }
172
Roland McGrathefd1ca52008-01-30 13:30:46 +0100173 err = 0;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /*
176 * Set a new TLS for the child thread?
177 */
Roland McGrathefd1ca52008-01-30 13:30:46 +0100178 if (clone_flags & CLONE_SETTLS)
179 err = do_set_thread_area(p, -1,
Josh Triplettc1bd55f2015-06-30 15:00:00 -0700180 (struct user_desc __user *)tls, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (err && p->thread.io_bitmap_ptr) {
183 kfree(p->thread.io_bitmap_ptr);
184 p->thread.io_bitmap_max = 0;
185 }
186 return err;
187}
188
Ingo Molnar513ad842008-02-21 05:18:40 +0100189void
190start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
191{
Tejun Heod9a89a22009-02-09 22:17:40 +0900192 set_user_gs(regs, 0);
Ingo Molnar513ad842008-02-21 05:18:40 +0100193 regs->fs = 0;
Ingo Molnar513ad842008-02-21 05:18:40 +0100194 regs->ds = __USER_DS;
195 regs->es = __USER_DS;
196 regs->ss = __USER_DS;
197 regs->cs = __USER_CS;
198 regs->ip = new_ip;
199 regs->sp = new_sp;
Al Viro6783eaa22012-08-02 23:05:11 +0400200 regs->flags = X86_EFLAGS_IF;
Brian Gerst1daeaa32015-03-21 18:54:21 -0400201 force_iret();
Ingo Molnar513ad842008-02-21 05:18:40 +0100202}
203EXPORT_SYMBOL_GPL(start_thread);
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206/*
Kamalesh Babulalea70ef32011-04-28 14:32:08 +0530207 * switch_to(x,y) should switch tasks from x to y.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 *
209 * We fsave/fwait so that an exception goes off at the right time
210 * (as a call from the fsave or fwait in effect) rather than to
211 * the wrong process. Lazy FP saving no longer makes any sense
212 * with modern CPU's, and this simplifies a lot of things (SMP
213 * and UP become the same).
214 *
215 * NOTE! We used to use the x86 hardware context switching. The
216 * reason for not using it any more becomes apparent when you
217 * try to recover gracefully from saved state that is no longer
218 * valid (stale segment register values in particular). With the
219 * hardware task-switch, there is no way to fix up bad state in
220 * a reasonable manner.
221 *
222 * The fact that Intel documents the hardware task-switching to
223 * be slow is a fairly red herring - this code is not noticeably
224 * faster. However, there _is_ some room for improvement here,
225 * so the performance issues may eventually be a valid point.
226 * More important, however, is the fact that this allows us much
227 * more flexibility.
228 *
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100229 * The return value (in %ax) will be the "prev" task after
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 * the task-switch, and shows up in ret_from_fork in entry.S,
231 * for example.
232 */
Andi Kleen35ea79032013-08-05 15:02:39 -0700233__visible __notrace_funcgraph struct task_struct *
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100234__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
236 struct thread_struct *prev = &prev_p->thread,
Ingo Molnar384a23f2015-04-23 17:43:27 +0200237 *next = &next_p->thread;
238 struct fpu *prev_fpu = &prev->fpu;
239 struct fpu *next_fpu = &next->fpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
243
Rik van Riel5f409e22019-04-03 18:41:52 +0200244 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
245 switch_fpu_prepare(prev_fpu, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 /*
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +0100248 * Save away %gs. No need to save %fs, as it was saved on the
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100249 * stack on entry. No need to save %es and %ds, as those are
250 * always kernel segments while inside the kernel. Doing this
251 * before setting the new TLS descriptors avoids the situation
252 * where we temporarily have non-reloadable segments in %fs
253 * and %gs. This could be an issue if the NMI handler ever
254 * used %fs or %gs (it does not today), or if the kernel is
255 * running inside of a hypervisor layer.
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700256 */
Tejun Heoccbeed32009-02-09 22:17:40 +0900257 lazy_save_gs(prev->gs);
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700258
259 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 * Load the per-thread Thread-Local Storage descriptor.
261 */
262 load_TLS(next, cpu);
263
264 /*
Zachary Amsden8b151142007-02-13 13:26:21 +0100265 * Restore IOPL if needed. In normal use, the flags restore
266 * in the switch assembly will handle this. But if the kernel
267 * is running virtualized at a non-zero CPL, the popf will
268 * not restore flags, so it must be done in a separate step.
269 */
270 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
271 set_iopl_mask(next->iopl);
272
Thomas Gleixnerff167012018-11-25 19:33:47 +0100273 switch_to_extra(prev_p, next_p);
Andrea Arcangeliffaa8bd2005-06-27 14:36:36 -0700274
Zachary Amsden9226d122007-02-13 13:26:21 +0100275 /*
276 * Leave lazy mode, flushing any hypercalls made here.
277 * This must be done before restoring TLS segments so
Sebastian Andrzej Siewior6dd677a2019-04-03 18:41:31 +0200278 * the GDT and LDT are properly updated.
Zachary Amsden9226d122007-02-13 13:26:21 +0100279 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800280 arch_end_context_switch(next_p);
Zachary Amsden9226d122007-02-13 13:26:21 +0100281
Andy Lutomirskib27559a2015-03-06 17:50:18 -0800282 /*
Denys Vlasenkofed7c3f2015-04-24 17:31:34 +0200283 * Reload esp0 and cpu_current_top_of_stack. This changes
Andy Lutomirskibd7dc5a2017-11-02 00:59:09 -0700284 * current_thread_info(). Refresh the SYSENTER configuration in
285 * case prev or next is vm86.
Andy Lutomirskib27559a2015-03-06 17:50:18 -0800286 */
Joerg Roedel252e1a02018-07-18 11:40:51 +0200287 update_task_stack(next_p);
Andy Lutomirskibd7dc5a2017-11-02 00:59:09 -0700288 refresh_sysenter_cs(next);
Andy Lutomirskia7fcf282015-03-06 17:50:19 -0800289 this_cpu_write(cpu_current_top_of_stack,
290 (unsigned long)task_stack_page(next_p) +
291 THREAD_SIZE);
Steven Rostedt198d2082014-02-06 09:41:31 -0500292
Zachary Amsden9226d122007-02-13 13:26:21 +0100293 /*
294 * Restore %gs if needed (which is common)
295 */
296 if (prev->gs | next->gs)
Tejun Heoccbeed32009-02-09 22:17:40 +0900297 lazy_load_gs(next->gs);
Zachary Amsden9226d122007-02-13 13:26:21 +0100298
Alex Shic6ae41e2012-05-11 15:35:27 +0800299 this_cpu_write(current_task, next_p);
Zachary Amsden9226d122007-02-13 13:26:21 +0100300
Rik van Riel5f409e22019-04-03 18:41:52 +0200301 switch_fpu_finish(next_fpu);
Sebastian Andrzej Siewior27221462019-04-03 18:41:36 +0200302
Fenghua Yu4f341a52016-10-28 15:04:48 -0700303 /* Load the Intel cache allocation PQR MSR. */
Babu Moger352940e2018-11-21 20:28:27 +0000304 resctrl_sched_in();
Fenghua Yu4f341a52016-10-28 15:04:48 -0700305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 return prev_p;
307}
Kyle Huey79170fd2017-03-20 01:16:24 -0700308
309SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
310{
311 return do_arch_prctl_common(current, option, arg2);
312}