blob: 9e2b952f810a601d16125dcfb1c2764d914d960c [file] [log] [blame]
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +01001#ifndef _X86_IRQFLAGS_H_
2#define _X86_IRQFLAGS_H_
3
4#include <asm/processor-flags.h>
5
6#ifndef __ASSEMBLY__
7/*
8 * Interrupt control:
9 */
10
11static inline unsigned long native_save_fl(void)
12{
13 unsigned long flags;
14
H. Peter Anvinf1f029c2009-08-03 16:33:40 -070015 /*
H. Peter Anvinab94fcf2009-08-25 16:47:16 -070016 * "=rm" is safe here, because "pop" adjusts the stack before
17 * it evaluates its effective address -- this is part of the
18 * documented behavior of the "pop" instruction.
H. Peter Anvinf1f029c2009-08-03 16:33:40 -070019 */
Joe Perchescf7f7192008-03-23 01:02:30 -070020 asm volatile("# __raw_save_flags\n\t"
21 "pushf ; pop %0"
H. Peter Anvinab94fcf2009-08-25 16:47:16 -070022 : "=rm" (flags)
Joe Perchescf7f7192008-03-23 01:02:30 -070023 : /* no input */
24 : "memory");
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +010025
26 return flags;
27}
28
29static inline void native_restore_fl(unsigned long flags)
30{
Joe Perchescf7f7192008-03-23 01:02:30 -070031 asm volatile("push %0 ; popf"
32 : /* no output */
33 :"g" (flags)
34 :"memory", "cc");
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +010035}
36
37static inline void native_irq_disable(void)
38{
39 asm volatile("cli": : :"memory");
40}
41
42static inline void native_irq_enable(void)
43{
44 asm volatile("sti": : :"memory");
45}
46
47static inline void native_safe_halt(void)
48{
49 asm volatile("sti; hlt": : :"memory");
50}
51
52static inline void native_halt(void)
53{
54 asm volatile("hlt": : :"memory");
55}
56
57#endif
58
59#ifdef CONFIG_PARAVIRT
60#include <asm/paravirt.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +020061#else
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +010062#ifndef __ASSEMBLY__
63
64static inline unsigned long __raw_local_save_flags(void)
65{
66 return native_save_fl();
67}
68
69static inline void raw_local_irq_restore(unsigned long flags)
70{
71 native_restore_fl(flags);
72}
73
74static inline void raw_local_irq_disable(void)
75{
76 native_irq_disable();
77}
78
79static inline void raw_local_irq_enable(void)
80{
81 native_irq_enable();
82}
83
84/*
85 * Used in the idle loop; sti takes one instruction cycle
86 * to complete:
87 */
88static inline void raw_safe_halt(void)
89{
90 native_safe_halt();
91}
92
93/*
94 * Used when interrupts are already enabled or to
95 * shutdown the processor:
96 */
97static inline void halt(void)
98{
99 native_halt();
100}
101
102/*
103 * For spinlocks, etc:
104 */
105static inline unsigned long __raw_local_irq_save(void)
106{
107 unsigned long flags = __raw_local_save_flags();
108
109 raw_local_irq_disable();
110
111 return flags;
112}
113#else
114
115#define ENABLE_INTERRUPTS(x) sti
116#define DISABLE_INTERRUPTS(x) cli
117
118#ifdef CONFIG_X86_64
Jeremy Fitzhardingedf366e92008-06-27 12:04:03 -0700119#define SWAPGS swapgs
120/*
121 * Currently paravirt can't handle swapgs nicely when we
122 * don't have a stack we can rely on (such as a user space
123 * stack). So we either find a way around these or just fault
124 * and emulate if a guest tries to call swapgs directly.
125 *
126 * Either way, this is a good way to document that we don't
127 * have a reliable stack. x86_64 only.
128 */
Jeremy Fitzhardingea00394f2008-06-25 00:19:30 -0400129#define SWAPGS_UNSAFE_STACK swapgs
Jeremy Fitzhardingedf366e92008-06-27 12:04:03 -0700130
131#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
132
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100133#define INTERRUPT_RETURN iretq
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400134#define USERGS_SYSRET64 \
135 swapgs; \
136 sysretq;
137#define USERGS_SYSRET32 \
138 swapgs; \
139 sysretl
140#define ENABLE_INTERRUPTS_SYSEXIT32 \
141 swapgs; \
142 sti; \
143 sysexit
144
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100145#else
146#define INTERRUPT_RETURN iret
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -0400147#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100148#define GET_CR0_INTO_EAX movl %cr0, %eax
149#endif
150
151
152#endif /* __ASSEMBLY__ */
153#endif /* CONFIG_PARAVIRT */
154
155#ifndef __ASSEMBLY__
Joe Perchescf7f7192008-03-23 01:02:30 -0700156#define raw_local_save_flags(flags) \
157 do { (flags) = __raw_local_save_flags(); } while (0)
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100158
Joe Perchescf7f7192008-03-23 01:02:30 -0700159#define raw_local_irq_save(flags) \
160 do { (flags) = __raw_local_irq_save(); } while (0)
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100161
162static inline int raw_irqs_disabled_flags(unsigned long flags)
163{
164 return !(flags & X86_EFLAGS_IF);
165}
166
167static inline int raw_irqs_disabled(void)
168{
169 unsigned long flags = __raw_local_save_flags();
170
171 return raw_irqs_disabled_flags(flags);
172}
173
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100174#else
175
176#ifdef CONFIG_X86_64
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100177#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
178#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
179 TRACE_IRQS_ON; \
180 sti; \
181 SAVE_REST; \
182 LOCKDEP_SYS_EXIT; \
183 RESTORE_REST; \
184 cli; \
185 TRACE_IRQS_OFF;
186
187#else
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100188#define ARCH_LOCKDEP_SYS_EXIT \
189 pushl %eax; \
190 pushl %ecx; \
191 pushl %edx; \
192 call lockdep_sys_exit; \
193 popl %edx; \
194 popl %ecx; \
195 popl %eax;
196
197#define ARCH_LOCKDEP_SYS_EXIT_IRQ
198#endif
199
200#ifdef CONFIG_TRACE_IRQFLAGS
Steven Rostedt81d68a92008-05-12 21:20:42 +0200201# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
202# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
Glauber de Oliveira Costa6abcd982008-01-30 13:30:33 +0100203#else
204# define TRACE_IRQS_ON
205# define TRACE_IRQS_OFF
206#endif
207#ifdef CONFIG_DEBUG_LOCK_ALLOC
208# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
209# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
210# else
211# define LOCKDEP_SYS_EXIT
212# define LOCKDEP_SYS_EXIT_IRQ
213# endif
214
215#endif /* __ASSEMBLY__ */
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200216#endif