blob: 562854c60808263798413e3b61b2cbaf230dfd11 [file] [log] [blame]
Thomas Gleixner931b9412020-05-21 22:05:23 +02001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_IRQ_STACK_H
3#define _ASM_X86_IRQ_STACK_H
4
5#include <linux/ptrace.h>
6
7#include <asm/processor.h>
8
9#ifdef CONFIG_X86_64
Thomas Gleixnera0cfc742021-02-10 00:40:46 +010010
11/*
12 * Macro to inline switching to an interrupt stack and invoking function
13 * calls from there. The following rules apply:
14 *
15 * - Ordering:
16 *
17 * 1. Write the stack pointer into the top most place of the irq
18 * stack. This ensures that the various unwinders can link back to the
19 * original stack.
20 *
21 * 2. Switch the stack pointer to the top of the irq stack.
22 *
23 * 3. Invoke whatever needs to be done (@asm_call argument)
24 *
25 * 4. Pop the original stack pointer from the top of the irq stack
26 * which brings it back to the original stack where it left off.
27 *
28 * - Function invocation:
29 *
30 * To allow flexible usage of the macro, the actual function code including
31 * the store of the arguments in the call ABI registers is handed in via
32 * the @asm_call argument.
33 *
34 * - Local variables:
35 *
36 * @tos:
37 * The @tos variable holds a pointer to the top of the irq stack and
38 * _must_ be allocated in a non-callee saved register as this is a
39 * restriction coming from objtool.
40 *
41 * Note, that (tos) is both in input and output constraints to ensure
42 * that the compiler does not assume that R11 is left untouched in
43 * case this macro is used in some place where the per cpu interrupt
44 * stack pointer is used again afterwards
45 *
46 * - Function arguments:
47 * The function argument(s), if any, have to be defined in register
48 * variables at the place where this is invoked. Storing the
49 * argument(s) in the proper register(s) is part of the @asm_call
50 *
51 * - Constraints:
52 *
53 * The constraints have to be done very carefully because the compiler
54 * does not know about the assembly call.
55 *
56 * output:
57 * As documented already above the @tos variable is required to be in
58 * the output constraints to make the compiler aware that R11 cannot be
59 * reused after the asm() statement.
60 *
61 * For builds with CONFIG_UNWIND_FRAME_POINTER ASM_CALL_CONSTRAINT is
62 * required as well as this prevents certain creative GCC variants from
63 * misplacing the ASM code.
64 *
65 * input:
66 * - func:
67 * Immediate, which tells the compiler that the function is referenced.
68 *
69 * - tos:
70 * Register. The actual register is defined by the variable declaration.
71 *
72 * - function arguments:
73 * The constraints are handed in via the 'argconstr' argument list. They
74 * describe the register arguments which are used in @asm_call.
75 *
76 * clobbers:
77 * Function calls can clobber anything except the callee-saved
78 * registers. Tell the compiler.
79 */
80#define call_on_irqstack(func, asm_call, argconstr...) \
81{ \
82 register void *tos asm("r11"); \
83 \
84 tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \
85 \
86 asm_inline volatile( \
87 "movq %%rsp, (%[tos]) \n" \
88 "movq %[tos], %%rsp \n" \
89 \
90 asm_call \
91 \
92 "popq %%rsp \n" \
93 \
94 : "+r" (tos), ASM_CALL_CONSTRAINT \
95 : [__func] "i" (func), [tos] "r" (tos) argconstr \
96 : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
97 "memory" \
98 ); \
99}
100
101/* Macros to assert type correctness for run_*_on_irqstack macros */
102#define assert_function_type(func, proto) \
103 static_assert(__builtin_types_compatible_p(typeof(&func), proto))
104
105#define assert_arg_type(arg, proto) \
106 static_assert(__builtin_types_compatible_p(typeof(arg), proto))
107
Thomas Gleixner569dd8b2021-02-10 00:40:47 +0100108/*
109 * Macro to invoke system vector and device interrupt C handlers.
110 */
111#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
112{ \
113 /* \
114 * User mode entry and interrupt on the irq stack do not \
115 * switch stacks. If from user mode the task stack is empty. \
116 */ \
117 if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
118 irq_enter_rcu(); \
119 func(c_args); \
120 irq_exit_rcu(); \
121 } else { \
122 /* \
123 * Mark the irq stack inuse _before_ and unmark _after_ \
124 * switching stacks. Interrupts are disabled in both \
125 * places. Invoke the stack switch macro with the call \
126 * sequence which matches the above direct invocation. \
127 */ \
128 __this_cpu_write(hardirq_stack_inuse, true); \
129 call_on_irqstack(func, asm_call, constr); \
130 __this_cpu_write(hardirq_stack_inuse, false); \
131 } \
132}
133
134/*
135 * Function call sequence for __call_on_irqstack() for system vectors.
136 *
137 * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
138 * mechanism because these functions are global and cannot be optimized out
139 * when compiling a particular source file which uses one of these macros.
140 *
141 * The argument (regs) does not need to be pushed or stashed in a callee
142 * saved register to be safe vs. the irq_enter_rcu() call because the
143 * clobbers already prevent the compiler from storing it in a callee
144 * clobbered register. As the compiler has to preserve @regs for the final
145 * call to idtentry_exit() anyway, it's likely that it does not cause extra
146 * effort for this asm magic.
147 */
148#define ASM_CALL_SYSVEC \
149 "call irq_enter_rcu \n" \
150 "movq %[arg1], %%rdi \n" \
151 "call %P[__func] \n" \
152 "call irq_exit_rcu \n"
153
154#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
155
156#define run_sysvec_on_irqstack_cond(func, regs) \
157{ \
158 assert_function_type(func, void (*)(struct pt_regs *)); \
159 assert_arg_type(regs, struct pt_regs *); \
160 \
161 call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
162 SYSVEC_CONSTRAINTS, regs); \
163}
164
Thomas Gleixner5b51e1d2021-02-10 00:40:48 +0100165/*
166 * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
167 * @regs and @vector in callee saved registers.
168 */
169#define ASM_CALL_IRQ \
170 "call irq_enter_rcu \n" \
171 "movq %[arg1], %%rdi \n" \
172 "movl %[arg2], %%esi \n" \
173 "call %P[__func] \n" \
174 "call irq_exit_rcu \n"
175
176#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector)
177
178#define run_irq_on_irqstack_cond(func, regs, vector) \
179{ \
180 assert_function_type(func, void (*)(struct pt_regs *, u32)); \
181 assert_arg_type(regs, struct pt_regs *); \
182 assert_arg_type(vector, u32); \
183 \
184 call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \
185 IRQ_CONSTRAINTS, regs, vector); \
186}
187
Thomas Gleixner52d743f2021-02-10 00:40:50 +0100188#define ASM_CALL_SOFTIRQ \
189 "call %P[__func] \n"
Thomas Gleixner931b9412020-05-21 22:05:23 +0200190
Thomas Gleixner52d743f2021-02-10 00:40:50 +0100191/*
Thomas Gleixner624db9e2021-02-10 00:40:51 +0100192 * Macro to invoke __do_softirq on the irq stack. This is only called from
Ingo Molnard9f6e122021-03-18 15:28:01 +0100193 * task context when bottom halves are about to be reenabled and soft
Thomas Gleixner624db9e2021-02-10 00:40:51 +0100194 * interrupts are pending to be processed. The interrupt stack cannot be in
195 * use here.
Thomas Gleixner52d743f2021-02-10 00:40:50 +0100196 */
Thomas Gleixner72f40a22021-02-10 00:40:54 +0100197#define do_softirq_own_stack() \
Thomas Gleixner52d743f2021-02-10 00:40:50 +0100198{ \
Thomas Gleixner624db9e2021-02-10 00:40:51 +0100199 __this_cpu_write(hardirq_stack_inuse, true); \
200 call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \
201 __this_cpu_write(hardirq_stack_inuse, false); \
Thomas Gleixnera7b34742020-09-22 09:58:52 +0200202}
203
Thomas Gleixner931b9412020-05-21 22:05:23 +0200204#else /* CONFIG_X86_64 */
Thomas Gleixner569dd8b2021-02-10 00:40:47 +0100205/* System vector handlers always run on the stack they interrupted. */
206#define run_sysvec_on_irqstack_cond(func, regs) \
207{ \
208 irq_enter_rcu(); \
209 func(regs); \
210 irq_exit_rcu(); \
211}
212
Thomas Gleixner5b51e1d2021-02-10 00:40:48 +0100213/* Switches to the irq stack within func() */
214#define run_irq_on_irqstack_cond(func, regs, vector) \
215{ \
216 irq_enter_rcu(); \
217 func(regs, vector); \
218 irq_exit_rcu(); \
219}
220
Thomas Gleixner931b9412020-05-21 22:05:23 +0200221#endif /* !CONFIG_X86_64 */
222
Thomas Gleixner931b9412020-05-21 22:05:23 +0200223#endif