David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle |
| 7 | * Copyright (C) 1996 by Paul M. Antoine |
| 8 | * Copyright (C) 1999 Silicon Graphics |
| 9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com |
| 10 | * Copyright (C) 2000 MIPS Technologies, Inc. |
| 11 | */ |
| 12 | #ifndef _ASM_SWITCH_TO_H |
| 13 | #define _ASM_SWITCH_TO_H |
| 14 | |
| 15 | #include <asm/cpu-features.h> |
| 16 | #include <asm/watch.h> |
| 17 | #include <asm/dsp.h> |
Jayachandran C | 2c952e0 | 2013-06-10 06:30:00 +0000 | [diff] [blame] | 18 | #include <asm/cop2.h> |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 19 | #include <asm/fpu.h> |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 20 | |
| 21 | struct task_struct; |
| 22 | |
Paul Burton | 8c0f8ab | 2013-11-19 17:30:37 +0000 | [diff] [blame] | 23 | /** |
| 24 | * resume - resume execution of a task |
| 25 | * @prev: The task previously executed. |
| 26 | * @next: The task to begin executing. |
| 27 | * @next_ti: task_thread_info(next). |
Paul Burton | 8c0f8ab | 2013-11-19 17:30:37 +0000 | [diff] [blame] | 28 | * |
| 29 | * This function is used whilst scheduling to save the context of prev & load |
| 30 | * the context of next. Returns prev. |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 31 | */ |
Paul Burton | 8c0f8ab | 2013-11-19 17:30:37 +0000 | [diff] [blame] | 32 | extern asmlinkage struct task_struct *resume(struct task_struct *prev, |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 33 | struct task_struct *next, struct thread_info *next_ti); |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 34 | |
| 35 | extern unsigned int ll_bit; |
| 36 | extern struct task_struct *ll_task; |
| 37 | |
| 38 | #ifdef CONFIG_MIPS_MT_FPAFF |
| 39 | |
| 40 | /* |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 41 | * Handle the scheduler resume end of FPU affinity management. We do this |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 42 | * inline to try to keep the overhead down. If we have been forced to run on |
| 43 | * a "CPU" with an FPU because of a previous high level of FP computation, |
| 44 | * but did not actually use the FPU during the most recent time-slice (CU1 |
| 45 | * isn't set), we undo the restriction on cpus_allowed. |
| 46 | * |
| 47 | * We're not calling set_cpus_allowed() here, because we have no need to |
| 48 | * force prompt migration - we're already switching the current CPU to a |
| 49 | * different thread. |
| 50 | */ |
| 51 | |
| 52 | #define __mips_mt_fpaff_switch_to(prev) \ |
| 53 | do { \ |
| 54 | struct thread_info *__prev_ti = task_thread_info(prev); \ |
| 55 | \ |
| 56 | if (cpu_has_fpu && \ |
| 57 | test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ |
| 58 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ |
| 59 | clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ |
| 60 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ |
| 61 | } \ |
| 62 | next->thread.emulated_fp = 0; \ |
| 63 | } while(0) |
| 64 | |
| 65 | #else |
| 66 | #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) |
| 67 | #endif |
| 68 | |
Paul Burton | 3b4b823 | 2016-10-17 15:34:36 +0100 | [diff] [blame] | 69 | /* |
| 70 | * Clear LLBit during context switches on MIPSr6 such that eretnc can be used |
| 71 | * unconditionally when returning to userland in entry.S. |
| 72 | */ |
| 73 | #define __clear_r6_hw_ll_bit() do { \ |
| 74 | if (cpu_has_mips_r6) \ |
Markos Chandras | 7c151d3 | 2014-12-03 12:37:32 +0000 | [diff] [blame] | 75 | write_c0_lladdr(0); \ |
Paul Burton | 3b4b823 | 2016-10-17 15:34:36 +0100 | [diff] [blame] | 76 | } while (0) |
| 77 | |
| 78 | #define __clear_software_ll_bit() do { \ |
| 79 | if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ |
| 80 | ll_bit = 0; \ |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 81 | } while (0) |
| 82 | |
Ralf Baechle | f51246e | 2015-07-29 12:14:42 +0200 | [diff] [blame] | 83 | /* |
Maciej W. Rozycki | 5a1aca4 | 2016-10-28 08:21:03 +0100 | [diff] [blame] | 84 | * Check FCSR for any unmasked exceptions pending set with `ptrace', |
| 85 | * clear them and send a signal. |
| 86 | */ |
Paul Burton | 36a49803 | 2018-11-07 23:14:09 +0000 | [diff] [blame] | 87 | #ifdef CONFIG_MIPS_FP_SUPPORT |
| 88 | # define __sanitize_fcr31(next) \ |
Maciej W. Rozycki | 5a1aca4 | 2016-10-28 08:21:03 +0100 | [diff] [blame] | 89 | do { \ |
| 90 | unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \ |
| 91 | void __user *pc; \ |
| 92 | \ |
| 93 | if (unlikely(fcr31)) { \ |
| 94 | pc = (void __user *)task_pt_regs(next)->cp0_epc; \ |
| 95 | next->thread.fpu.fcr31 &= ~fcr31; \ |
| 96 | force_fcr31_sig(fcr31, pc, next); \ |
| 97 | } \ |
| 98 | } while (0) |
Paul Burton | 36a49803 | 2018-11-07 23:14:09 +0000 | [diff] [blame] | 99 | #else |
| 100 | # define __sanitize_fcr31(next) |
| 101 | #endif |
Maciej W. Rozycki | 5a1aca4 | 2016-10-28 08:21:03 +0100 | [diff] [blame] | 102 | |
| 103 | /* |
Ralf Baechle | f51246e | 2015-07-29 12:14:42 +0200 | [diff] [blame] | 104 | * For newly created kernel threads switch_to() will return to |
| 105 | * ret_from_kernel_thread, newly created user threads to ret_from_fork. |
| 106 | * That is, everything following resume() will be skipped for new threads. |
| 107 | * So everything that matters to new threads should be placed before resume(). |
| 108 | */ |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 109 | #define switch_to(prev, next, last) \ |
| 110 | do { \ |
| 111 | __mips_mt_fpaff_switch_to(prev); \ |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 112 | lose_fpu_inatomic(1, prev); \ |
Maciej W. Rozycki | 5a1aca4 | 2016-10-28 08:21:03 +0100 | [diff] [blame] | 113 | if (tsk_used_math(next)) \ |
| 114 | __sanitize_fcr31(next); \ |
Ralf Baechle | f51246e | 2015-07-29 12:14:42 +0200 | [diff] [blame] | 115 | if (cpu_has_dsp) { \ |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 116 | __save_dsp(prev); \ |
Ralf Baechle | f51246e | 2015-07-29 12:14:42 +0200 | [diff] [blame] | 117 | __restore_dsp(next); \ |
| 118 | } \ |
| 119 | if (cop2_present) { \ |
| 120 | set_c0_status(ST0_CU2); \ |
| 121 | if ((KSTK_STATUS(prev) & ST0_CU2)) { \ |
| 122 | if (cop2_lazy_restore) \ |
| 123 | KSTK_STATUS(prev) &= ~ST0_CU2; \ |
| 124 | cop2_save(prev); \ |
| 125 | } \ |
| 126 | if (KSTK_STATUS(next) & ST0_CU2 && \ |
| 127 | !cop2_lazy_restore) { \ |
| 128 | cop2_restore(next); \ |
| 129 | } \ |
| 130 | clear_c0_status(ST0_CU2); \ |
Jayachandran C | 2c952e0 | 2013-06-10 06:30:00 +0000 | [diff] [blame] | 131 | } \ |
Paul Burton | 3b4b823 | 2016-10-17 15:34:36 +0100 | [diff] [blame] | 132 | __clear_r6_hw_ll_bit(); \ |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 133 | __clear_software_ll_bit(); \ |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 134 | if (cpu_has_userlocal) \ |
Ralf Baechle | f51246e | 2015-07-29 12:14:42 +0200 | [diff] [blame] | 135 | write_c0_userlocal(task_thread_info(next)->tp_value); \ |
James Hogan | a7e8932 | 2016-03-01 22:19:36 +0000 | [diff] [blame] | 136 | __restore_watch(next); \ |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 137 | (last) = resume(prev, next, task_thread_info(next)); \ |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 138 | } while (0) |
| 139 | |
| 140 | #endif /* _ASM_SWITCH_TO_H */ |