blob: 0de05a268b22e7c5366b81e04c5c4e89392188b6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/signal.h"
8#include "linux/kernel.h"
9#include "linux/interrupt.h"
10#include "linux/ptrace.h"
11#include "asm/system.h"
12#include "asm/pgalloc.h"
13#include "asm/ptrace.h"
14#include "asm/tlbflush.h"
15#include "irq_user.h"
16#include "signal_user.h"
17#include "kern_util.h"
18#include "user_util.h"
19#include "os.h"
20#include "kern.h"
21#include "sigcontext.h"
22#include "time_user.h"
23#include "mem_user.h"
24#include "tlb.h"
25#include "mode.h"
26#include "init.h"
27#include "tt.h"
28
Jeff Dikef6e34c62005-09-16 19:27:43 -070029int switch_to_tt(void *prev, void *next, void *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
31 struct task_struct *from, *to, *prev_sched;
32 unsigned long flags;
33 int err, vtalrm, alrm, prof, cpu;
34 char c;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36 from = prev;
37 to = next;
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 cpu = from->thread_info->cpu;
40 if(cpu == 0)
41 forward_interrupts(to->thread.mode.tt.extern_pid);
42#ifdef CONFIG_SMP
43 forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid);
44#endif
45 local_irq_save(flags);
46
47 vtalrm = change_sig(SIGVTALRM, 0);
48 alrm = change_sig(SIGALRM, 0);
49 prof = change_sig(SIGPROF, 0);
50
51 forward_pending_sigio(to->thread.mode.tt.extern_pid);
52
53 c = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
56 if(err != sizeof(c))
57 panic("write of switch_pipe failed, err = %d", -err);
58
Paolo 'Blaisorblade' Giarrusso8e216832005-05-28 15:52:04 -070059 if(from->thread.mode.tt.switch_pipe[0] == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 os_kill_process(os_getpid(), 0);
61
62 err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c));
63 if(err != sizeof(c))
64 panic("read of switch_pipe failed, errno = %d", -err);
65
66 /* If the process that we have just scheduled away from has exited,
67 * then it needs to be killed here. The reason is that, even though
68 * it will kill itself when it next runs, that may be too late. Its
69 * stack will be freed, possibly before then, and if that happens,
70 * we have a use-after-free situation. So, it gets killed here
71 * in case it has not already killed itself.
72 */
73 prev_sched = current->thread.prev_sched;
Bodo Stroesser0f7e6632005-05-06 21:30:54 -070074 if(prev_sched->thread.mode.tt.switch_pipe[0] == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1);
76
77 change_sig(SIGVTALRM, vtalrm);
78 change_sig(SIGALRM, alrm);
79 change_sig(SIGPROF, prof);
80
81 arch_switch();
82
83 flush_tlb_all();
84 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87void release_thread_tt(struct task_struct *task)
88{
89 int pid = task->thread.mode.tt.extern_pid;
90
Bodo Stroesser0f7e6632005-05-06 21:30:54 -070091 /*
92 * We first have to kill the other process, before
93 * closing its switch_pipe. Else it might wake up
94 * and receive "EOF" before we could kill it.
95 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 if(os_getpid() != pid)
97 os_kill_process(pid, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Bodo Stroesser0f7e6632005-05-06 21:30:54 -070099 os_close_file(task->thread.mode.tt.switch_pipe[0]);
100 os_close_file(task->thread.mode.tt.switch_pipe[1]);
101 /* use switch_pipe as flag: thread is released */
102 task->thread.mode.tt.switch_pipe[0] = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
105void suspend_new_thread(int fd)
106{
107 int err;
108 char c;
109
110 os_stop_process(os_getpid());
111 err = os_read_file(fd, &c, sizeof(c));
112 if(err != sizeof(c))
113 panic("read failed in suspend_new_thread, err = %d", -err);
114}
115
116void schedule_tail(task_t *prev);
117
118static void new_thread_handler(int sig)
119{
120 unsigned long disable;
121 int (*fn)(void *);
122 void *arg;
123
124 fn = current->thread.request.u.thread.proc;
125 arg = current->thread.request.u.thread.arg;
126
127 UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
128 disable = (1 << (SIGVTALRM - 1)) | (1 << (SIGALRM - 1)) |
129 (1 << (SIGIO - 1)) | (1 << (SIGPROF - 1));
130 SC_SIGMASK(UPT_SC(&current->thread.regs.regs)) &= ~disable;
131
132 suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
133
134 force_flush_all();
135 if(current->thread.prev_sched != NULL)
136 schedule_tail(current->thread.prev_sched);
137 current->thread.prev_sched = NULL;
138
139 init_new_thread_signals(1);
140 enable_timer();
141 free_page(current->thread.temp_stack);
142 set_cmdline("(kernel thread)");
143
144 change_sig(SIGUSR1, 1);
145 change_sig(SIGVTALRM, 1);
146 change_sig(SIGPROF, 1);
147 local_irq_enable();
148 if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))
149 do_exit(0);
150
151 /* XXX No set_user_mode here because a newly execed process will
152 * immediately segfault on its non-existent IP, coming straight back
153 * to the signal handler, which will call set_user_mode on its way
154 * out. This should probably change since it's confusing.
155 */
156}
157
158static int new_thread_proc(void *stack)
159{
160 /* local_irq_disable is needed to block out signals until this thread is
161 * properly scheduled. Otherwise, the tracing thread will get mighty
162 * upset about any signals that arrive before that.
163 * This has the complication that it sets the saved signal mask in
164 * the sigcontext to block signals. This gets restored when this
165 * thread (or a descendant, since they get a copy of this sigcontext)
166 * returns to userspace.
167 * So, this is compensated for elsewhere.
168 * XXX There is still a small window until local_irq_disable() actually
169 * finishes where signals are possible - shouldn't be a problem in
170 * practice since SIGIO hasn't been forwarded here yet, and the
171 * local_irq_disable should finish before a SIGVTALRM has time to be
172 * delivered.
173 */
174
175 local_irq_disable();
176 init_new_thread_stack(stack, new_thread_handler);
177 os_usr1_process(os_getpid());
178 change_sig(SIGUSR1, 1);
179 return(0);
180}
181
182/* Signal masking - signals are blocked at the start of fork_tramp. They
183 * are re-enabled when finish_fork_handler is entered by fork_tramp hitting
184 * itself with a SIGUSR1. set_user_mode has to be run with SIGUSR1 off,
185 * so it is blocked before it's called. They are re-enabled on sigreturn
186 * despite the fact that they were blocked when the SIGUSR1 was issued because
187 * copy_thread copies the parent's sigcontext, including the signal mask
188 * onto the signal frame.
189 */
190
191void finish_fork_handler(int sig)
192{
193 UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
194 suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
195
196 force_flush_all();
197 if(current->thread.prev_sched != NULL)
198 schedule_tail(current->thread.prev_sched);
199 current->thread.prev_sched = NULL;
200
201 enable_timer();
202 change_sig(SIGVTALRM, 1);
203 local_irq_enable();
204 if(current->mm != current->parent->mm)
205 protect_memory(uml_reserved, high_physmem - uml_reserved, 1,
206 1, 0, 1);
207 task_protections((unsigned long) current_thread);
208
209 free_page(current->thread.temp_stack);
210 local_irq_disable();
211 change_sig(SIGUSR1, 0);
212 set_user_mode(current);
213}
214
215int fork_tramp(void *stack)
216{
217 local_irq_disable();
218 arch_init_thread();
219 init_new_thread_stack(stack, finish_fork_handler);
220
221 os_usr1_process(os_getpid());
222 change_sig(SIGUSR1, 1);
223 return(0);
224}
225
226int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
227 unsigned long stack_top, struct task_struct * p,
228 struct pt_regs *regs)
229{
230 int (*tramp)(void *);
231 int new_pid, err;
232 unsigned long stack;
233
234 if(current->thread.forking)
235 tramp = fork_tramp;
236 else {
237 tramp = new_thread_proc;
238 p->thread.request.u.thread = current->thread.request.u.thread;
239 }
240
241 err = os_pipe(p->thread.mode.tt.switch_pipe, 1, 1);
242 if(err < 0){
243 printk("copy_thread : pipe failed, err = %d\n", -err);
244 return(err);
245 }
246
247 stack = alloc_stack(0, 0);
248 if(stack == 0){
249 printk(KERN_ERR "copy_thread : failed to allocate "
250 "temporary stack\n");
251 return(-ENOMEM);
252 }
253
254 clone_flags &= CLONE_VM;
255 p->thread.temp_stack = stack;
256 new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp);
257 if(new_pid < 0){
258 printk(KERN_ERR "copy_thread : clone failed - errno = %d\n",
259 -new_pid);
260 return(new_pid);
261 }
262
263 if(current->thread.forking){
Jeff Dikee0877f02005-06-25 14:55:21 -0700264 sc_to_sc(UPT_SC(&p->thread.regs.regs), UPT_SC(&regs->regs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 SC_SET_SYSCALL_RETURN(UPT_SC(&p->thread.regs.regs), 0);
Jeff Dikee0877f02005-06-25 14:55:21 -0700266 if(sp != 0)
267 SC_SP(UPT_SC(&p->thread.regs.regs)) = sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
269 p->thread.mode.tt.extern_pid = new_pid;
270
271 current->thread.request.op = OP_FORK;
272 current->thread.request.u.fork.pid = new_pid;
273 os_usr1_process(os_getpid());
274
275 /* Enable the signal and then disable it to ensure that it is handled
276 * here, and nowhere else.
277 */
278 change_sig(SIGUSR1, 1);
279
280 change_sig(SIGUSR1, 0);
281 err = 0;
282 return(err);
283}
284
285void reboot_tt(void)
286{
287 current->thread.request.op = OP_REBOOT;
288 os_usr1_process(os_getpid());
289 change_sig(SIGUSR1, 1);
290}
291
292void halt_tt(void)
293{
294 current->thread.request.op = OP_HALT;
295 os_usr1_process(os_getpid());
296 change_sig(SIGUSR1, 1);
297}
298
299void kill_off_processes_tt(void)
300{
301 struct task_struct *p;
302 int me;
303
304 me = os_getpid();
305 for_each_process(p){
306 if(p->thread.mode.tt.extern_pid != me)
307 os_kill_process(p->thread.mode.tt.extern_pid, 0);
308 }
309 if(init_task.thread.mode.tt.extern_pid != me)
310 os_kill_process(init_task.thread.mode.tt.extern_pid, 0);
311}
312
313void initial_thread_cb_tt(void (*proc)(void *), void *arg)
314{
315 if(os_getpid() == tracing_pid){
316 (*proc)(arg);
317 }
318 else {
319 current->thread.request.op = OP_CB;
320 current->thread.request.u.cb.proc = proc;
321 current->thread.request.u.cb.arg = arg;
322 os_usr1_process(os_getpid());
323 change_sig(SIGUSR1, 1);
324
325 change_sig(SIGUSR1, 0);
326 }
327}
328
329int do_proc_op(void *t, int proc_id)
330{
331 struct task_struct *task;
332 struct thread_struct *thread;
333 int op, pid;
334
335 task = t;
336 thread = &task->thread;
337 op = thread->request.op;
338 switch(op){
339 case OP_NONE:
340 case OP_TRACE_ON:
341 break;
342 case OP_EXEC:
343 pid = thread->request.u.exec.pid;
344 do_exec(thread->mode.tt.extern_pid, pid);
345 thread->mode.tt.extern_pid = pid;
346 cpu_tasks[task->thread_info->cpu].pid = pid;
347 break;
348 case OP_FORK:
349 attach_process(thread->request.u.fork.pid);
350 break;
351 case OP_CB:
352 (*thread->request.u.cb.proc)(thread->request.u.cb.arg);
353 break;
354 case OP_REBOOT:
355 case OP_HALT:
356 break;
357 default:
358 tracer_panic("Bad op in do_proc_op");
359 break;
360 }
361 thread->request.op = OP_NONE;
362 return(op);
363}
364
365void init_idle_tt(void)
366{
367 default_idle();
368}
369
370extern void start_kernel(void);
371
372static int start_kernel_proc(void *unused)
373{
374 int pid;
375
376 block_signals();
377 pid = os_getpid();
378
379 cpu_tasks[0].pid = pid;
380 cpu_tasks[0].task = current;
381#ifdef CONFIG_SMP
382 cpu_online_map = cpumask_of_cpu(0);
383#endif
384 if(debug) os_stop_process(pid);
385 start_kernel();
386 return(0);
387}
388
389void set_tracing(void *task, int tracing)
390{
391 ((struct task_struct *) task)->thread.mode.tt.tracing = tracing;
392}
393
394int is_tracing(void *t)
395{
396 return (((struct task_struct *) t)->thread.mode.tt.tracing);
397}
398
399int set_user_mode(void *t)
400{
401 struct task_struct *task;
402
403 task = t ? t : current;
404 if(task->thread.mode.tt.tracing)
405 return(1);
406 task->thread.request.op = OP_TRACE_ON;
407 os_usr1_process(os_getpid());
408 return(0);
409}
410
411void set_init_pid(int pid)
412{
413 int err;
414
415 init_task.thread.mode.tt.extern_pid = pid;
416 err = os_pipe(init_task.thread.mode.tt.switch_pipe, 1, 1);
417 if(err)
418 panic("Can't create switch pipe for init_task, errno = %d",
419 -err);
420}
421
422int start_uml_tt(void)
423{
424 void *sp;
425 int pages;
426
427 pages = (1 << CONFIG_KERNEL_STACK_ORDER);
428 sp = (void *) ((unsigned long) init_task.thread_info) +
429 pages * PAGE_SIZE - sizeof(unsigned long);
430 return(tracer(start_kernel_proc, sp));
431}
432
433int external_pid_tt(struct task_struct *task)
434{
435 return(task->thread.mode.tt.extern_pid);
436}
437
438int thread_pid_tt(struct task_struct *task)
439{
440 return(task->thread.mode.tt.extern_pid);
441}
442
443int is_valid_pid(int pid)
444{
445 struct task_struct *task;
446
447 read_lock(&tasklist_lock);
448 for_each_process(task){
449 if(task->thread.mode.tt.extern_pid == pid){
450 read_unlock(&tasklist_lock);
451 return(1);
452 }
453 }
454 read_unlock(&tasklist_lock);
455 return(0);
456}