blob: 3a0db7b0b46efcd9780ed23a8ec7a9852ec18cdc [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
Kumar Galab671ad22005-09-21 16:52:55 -05005#ifndef _ASM_POWERPC_HW_IRQ_H
6#define _ASM_POWERPC_HW_IRQ_H
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#ifdef __KERNEL__
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/errno.h>
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100011#include <linux/compiler.h>
Kumar Galab671ad22005-09-21 16:52:55 -050012#include <asm/ptrace.h>
13#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110015#ifdef CONFIG_PPC64
16
17/*
18 * PACA flags in paca->irq_happened.
19 *
20 * This bits are set when interrupts occur while soft-disabled
21 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22 * is set whenever we manually hard disable.
23 */
24#define PACA_IRQ_HARD_DIS 0x01
25#define PACA_IRQ_DBELL 0x02
26#define PACA_IRQ_EE 0x04
27#define PACA_IRQ_DEC 0x08 /* Or FIT */
28#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053029#define PACA_IRQ_HMI 0x20
Madhavan Srinivasanf442d002017-12-20 09:25:53 +053030#define PACA_IRQ_PMI 0x40
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110031
Madhavan Srinivasanc2e480b2017-12-20 09:25:42 +053032/*
Nicholas Piggin6cc3f912018-02-03 17:17:50 +100033 * Some soft-masked interrupts must be hard masked until they are replayed
34 * (e.g., because the soft-masked handler does not clear the exception).
35 */
36#ifdef CONFIG_PPC_BOOK3S
37#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
38#else
39#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
40#endif
41
42/*
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +053043 * flags for paca->irq_soft_mask
Madhavan Srinivasanc2e480b2017-12-20 09:25:42 +053044 */
Madhavan Srinivasan01417c62017-12-20 09:25:49 +053045#define IRQS_ENABLED 0
Madhavan Srinivasanf442d002017-12-20 09:25:53 +053046#define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
47#define IRQS_PMI_DISABLED 2
48#define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
Madhavan Srinivasanc2e480b2017-12-20 09:25:42 +053049
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110050#endif /* CONFIG_PPC64 */
51
52#ifndef __ASSEMBLY__
53
Nicholas Piggin6de66382017-11-05 23:33:55 +110054extern void replay_system_reset(void);
Nicholas Piggin6cc0c162020-02-26 03:35:37 +100055extern void replay_soft_interrupts(void);
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110056
Kumar Galac7aeffc2005-09-19 09:30:27 -050057extern void timer_interrupt(struct pt_regs *);
Nicholas Piggin3f984622018-05-05 03:19:31 +100058extern void timer_broadcast_interrupt(void);
Alexander Graf7cc1e8e2012-02-22 16:26:34 +010059extern void performance_monitor_exception(struct pt_regs *regs);
Bharat Bhushan6328e592012-06-20 05:56:53 +000060extern void WatchdogException(struct pt_regs *regs);
61extern void unknown_exception(struct pt_regs *regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100063#ifdef CONFIG_PPC64
64#include <asm/paca.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +053066static inline notrace unsigned long irq_soft_mask_return(void)
Madhavan Srinivasane0b56872017-12-20 09:25:46 +053067{
68 unsigned long flags;
69
70 asm volatile(
71 "lbz %0,%1(13)"
72 : "=r" (flags)
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +053073 : "i" (offsetof(struct paca_struct, irq_soft_mask)));
Madhavan Srinivasane0b56872017-12-20 09:25:46 +053074
75 return flags;
76}
77
Madhavan Srinivasan0b63acf2017-12-20 09:25:45 +053078/*
79 * The "memory" clobber acts as both a compiler barrier
80 * for the critical section and as a clobber because
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +053081 * we changed paca->irq_soft_mask
Madhavan Srinivasan0b63acf2017-12-20 09:25:45 +053082 */
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +053083static inline notrace void irq_soft_mask_set(unsigned long mask)
Madhavan Srinivasan0b63acf2017-12-20 09:25:45 +053084{
Madhavan Srinivasan9aa88182017-12-20 09:25:54 +053085#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
Madhavan Srinivasan01417c62017-12-20 09:25:49 +053086 /*
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +053087 * The irq mask must always include the STD bit if any are set.
88 *
89 * and interrupts don't get replayed until the standard
90 * interrupt (local_irq_disable()) is unmasked.
91 *
92 * Other masks must only provide additional masking beyond
93 * the standard, and they are also not replayed until the
94 * standard interrupt becomes unmasked.
95 *
96 * This could be changed, but it will require partial
97 * unmasks to be replayed, among other things. For now, take
98 * the simple approach.
Madhavan Srinivasan01417c62017-12-20 09:25:49 +053099 */
100 WARN_ON(mask && !(mask & IRQS_DISABLED));
101#endif
102
Madhavan Srinivasan0b63acf2017-12-20 09:25:45 +0530103 asm volatile(
104 "stb %0,%1(13)"
105 :
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530106 : "r" (mask),
107 "i" (offsetof(struct paca_struct, irq_soft_mask))
Madhavan Srinivasan0b63acf2017-12-20 09:25:45 +0530108 : "memory");
109}
110
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530111static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
Madhavan Srinivasana67c5432017-12-20 09:25:47 +0530112{
113 unsigned long flags;
114
Madhavan Srinivasan9aa88182017-12-20 09:25:54 +0530115#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
Madhavan Srinivasan01417c62017-12-20 09:25:49 +0530116 WARN_ON(mask && !(mask & IRQS_DISABLED));
117#endif
118
Madhavan Srinivasana67c5432017-12-20 09:25:47 +0530119 asm volatile(
120 "lbz %0,%1(13); stb %2,%1(13)"
121 : "=&r" (flags)
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530122 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
Madhavan Srinivasan01417c62017-12-20 09:25:49 +0530123 "r" (mask)
Madhavan Srinivasana67c5432017-12-20 09:25:47 +0530124 : "memory");
125
126 return flags;
127}
128
Madhavan Srinivasanc6424382017-12-20 09:25:55 +0530129static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
130{
131 unsigned long flags, tmp;
132
133 asm volatile(
134 "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
135 : "=&r" (flags), "=r" (tmp)
136 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
137 "r" (mask)
138 : "memory");
139
140#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
141 WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
142#endif
143
144 return flags;
145}
146
David Howellsdf9ee292010-10-07 14:08:55 +0100147static inline unsigned long arch_local_save_flags(void)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000148{
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530149 return irq_soft_mask_return();
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000150}
151
Madhavan Srinivasanb5c1bd62017-12-20 09:25:44 +0530152static inline void arch_local_irq_disable(void)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000153{
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530154 irq_soft_mask_set(IRQS_DISABLED);
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000155}
156
David Howellsdf9ee292010-10-07 14:08:55 +0100157extern void arch_local_irq_restore(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
David Howellsdf9ee292010-10-07 14:08:55 +0100159static inline void arch_local_irq_enable(void)
160{
Madhavan Srinivasanc2e480b2017-12-20 09:25:42 +0530161 arch_local_irq_restore(IRQS_ENABLED);
David Howellsdf9ee292010-10-07 14:08:55 +0100162}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
David Howellsdf9ee292010-10-07 14:08:55 +0100164static inline unsigned long arch_local_irq_save(void)
165{
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530166 return irq_soft_mask_set_return(IRQS_DISABLED);
David Howellsdf9ee292010-10-07 14:08:55 +0100167}
168
169static inline bool arch_irqs_disabled_flags(unsigned long flags)
170{
Madhavan Srinivasan01417c62017-12-20 09:25:49 +0530171 return flags & IRQS_DISABLED;
David Howellsdf9ee292010-10-07 14:08:55 +0100172}
173
174static inline bool arch_irqs_disabled(void)
175{
176 return arch_irqs_disabled_flags(arch_local_save_flags());
177}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Madhavan Srinivasanc6424382017-12-20 09:25:55 +0530179#ifdef CONFIG_PPC_BOOK3S
180/*
181 * To support disabling and enabling of irq with PMI, set of
182 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
183 * functions are added. These macros are implemented using generic
184 * linux local_irq_* code from include/linux/irqflags.h.
185 */
186#define raw_local_irq_pmu_save(flags) \
187 do { \
188 typecheck(unsigned long, flags); \
189 flags = irq_soft_mask_or_return(IRQS_DISABLED | \
190 IRQS_PMI_DISABLED); \
191 } while(0)
192
193#define raw_local_irq_pmu_restore(flags) \
194 do { \
195 typecheck(unsigned long, flags); \
196 arch_local_irq_restore(flags); \
197 } while(0)
198
199#ifdef CONFIG_TRACE_IRQFLAGS
200#define powerpc_local_irq_pmu_save(flags) \
201 do { \
202 raw_local_irq_pmu_save(flags); \
203 trace_hardirqs_off(); \
204 } while(0)
205#define powerpc_local_irq_pmu_restore(flags) \
206 do { \
207 if (raw_irqs_disabled_flags(flags)) { \
208 raw_local_irq_pmu_restore(flags); \
209 trace_hardirqs_off(); \
210 } else { \
211 trace_hardirqs_on(); \
212 raw_local_irq_pmu_restore(flags); \
213 } \
214 } while(0)
215#else
216#define powerpc_local_irq_pmu_save(flags) \
217 do { \
218 raw_local_irq_pmu_save(flags); \
219 } while(0)
220#define powerpc_local_irq_pmu_restore(flags) \
221 do { \
222 raw_local_irq_pmu_restore(flags); \
223 } while (0)
224#endif /* CONFIG_TRACE_IRQFLAGS */
225
226#endif /* CONFIG_PPC_BOOK3S */
227
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000228#ifdef CONFIG_PPC_BOOK3E
Christophe Leroyb020aa92019-08-29 08:45:13 +0000229#define __hard_irq_enable() wrtee(MSR_EE)
230#define __hard_irq_disable() wrtee(0)
Nicholas Piggin68b34582020-02-26 03:35:34 +1000231#define __hard_EE_RI_disable() wrtee(0)
232#define __hard_RI_enable() do { } while (0)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000233#else
Nicholas Piggin54071e42018-05-05 03:19:28 +1000234#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
235#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
Nicholas Piggin68b34582020-02-26 03:35:34 +1000236#define __hard_EE_RI_disable() __mtmsrd(0, 1)
237#define __hard_RI_enable() __mtmsrd(MSR_RI, 1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000238#endif
Benjamin Herrenschmidte1fa2e12007-05-10 22:22:45 -0700239
Madhavan Srinivasanf442d002017-12-20 09:25:53 +0530240#define hard_irq_disable() do { \
241 unsigned long flags; \
242 __hard_irq_disable(); \
243 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
244 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
Michael Ellerman7b087292018-05-02 23:07:26 +1000245 if (!arch_irqs_disabled_flags(flags)) { \
246 asm ("stdx %%r1, 0, %1 ;" \
247 : "=m" (local_paca->saved_r1) \
248 : "b" (&local_paca->saved_r1)); \
Madhavan Srinivasanf442d002017-12-20 09:25:53 +0530249 trace_hardirqs_off(); \
Michael Ellerman7b087292018-05-02 23:07:26 +1000250 } \
Benjamin Herrenschmidt57377892013-05-06 21:04:02 +0000251} while(0)
Paul Mackerrasf9485012012-06-15 14:51:39 +1000252
Michael Ellerman00943682020-05-03 00:33:16 +1000253static inline bool __lazy_irq_pending(u8 irq_happened)
254{
255 return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
256}
257
258/*
259 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
260 */
Anton Blanchard0b17ba72012-06-27 13:13:52 +0000261static inline bool lazy_irq_pending(void)
262{
Michael Ellerman00943682020-05-03 00:33:16 +1000263 return __lazy_irq_pending(get_paca()->irq_happened);
264}
265
266/*
267 * Check if a lazy IRQ is pending, with no debugging checks.
268 * Should be called with IRQs hard disabled.
269 * For use in RI disabled code or other constrained situations.
270 */
271static inline bool lazy_irq_pending_nocheck(void)
272{
273 return __lazy_irq_pending(local_paca->irq_happened);
Anton Blanchard0b17ba72012-06-27 13:13:52 +0000274}
275
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100276/*
277 * This is called by asynchronous interrupts to conditionally
Nicholas Piggin9b81c022018-06-03 22:24:32 +1000278 * re-enable hard interrupts after having cleared the source
279 * of the interrupt. They are kept disabled if there is a different
280 * soft-masked interrupt pending that requires hard masking.
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100281 */
282static inline void may_hard_irq_enable(void)
283{
Nicholas Piggin9b81c022018-06-03 22:24:32 +1000284 if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
285 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100286 __hard_irq_enable();
Nicholas Piggin9b81c022018-06-03 22:24:32 +1000287 }
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100288}
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000289
Benjamin Herrenschmidta5464982012-03-07 16:48:45 +1100290static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
291{
Madhavan Srinivasan01417c62017-12-20 09:25:49 +0530292 return (regs->softe & IRQS_DISABLED);
Benjamin Herrenschmidta5464982012-03-07 16:48:45 +1100293}
294
Benjamin Herrenschmidtbe2cf202012-07-10 18:36:40 +1000295extern bool prep_irq_for_idle(void);
Nicholas Piggin2201f992017-06-13 23:05:45 +1000296extern bool prep_irq_for_idle_irqsoff(void);
Nicholas Piggin771d4302017-06-13 23:05:47 +1000297extern void irq_set_pending_from_srr1(unsigned long srr1);
Nicholas Piggin2201f992017-06-13 23:05:45 +1000298
299#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
Benjamin Herrenschmidtbe2cf202012-07-10 18:36:40 +1000300
Benjamin Herrenschmidt1d607bb2016-07-08 16:37:07 +1000301extern void force_external_irq_replay(void);
302
David Howellsdf9ee292010-10-07 14:08:55 +0100303#else /* CONFIG_PPC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
David Howellsdf9ee292010-10-07 14:08:55 +0100305static inline unsigned long arch_local_save_flags(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
David Howellsdf9ee292010-10-07 14:08:55 +0100307 return mfmsr();
308}
Paul Mackerras4c75f842009-06-12 02:00:50 +0000309
David Howellsdf9ee292010-10-07 14:08:55 +0100310static inline void arch_local_irq_restore(unsigned long flags)
311{
Christophe Leroyb020aa92019-08-29 08:45:13 +0000312 if (IS_ENABLED(CONFIG_BOOKE))
313 wrtee(flags);
314 else
315 mtmsr(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316}
317
David Howellsdf9ee292010-10-07 14:08:55 +0100318static inline unsigned long arch_local_irq_save(void)
319{
320 unsigned long flags = arch_local_save_flags();
Christophe Leroyb020aa92019-08-29 08:45:13 +0000321
322 if (IS_ENABLED(CONFIG_BOOKE))
323 wrtee(0);
324 else if (IS_ENABLED(CONFIG_PPC_8xx))
325 wrtspr(SPRN_EID);
326 else
327 mtmsr(flags & ~MSR_EE);
328
David Howellsdf9ee292010-10-07 14:08:55 +0100329 return flags;
330}
331
332static inline void arch_local_irq_disable(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
Christophe Leroyb020aa92019-08-29 08:45:13 +0000334 if (IS_ENABLED(CONFIG_BOOKE))
335 wrtee(0);
336 else if (IS_ENABLED(CONFIG_PPC_8xx))
337 wrtspr(SPRN_EID);
338 else
339 mtmsr(mfmsr() & ~MSR_EE);
David Howellsdf9ee292010-10-07 14:08:55 +0100340}
Paul Mackerras4c75f842009-06-12 02:00:50 +0000341
David Howellsdf9ee292010-10-07 14:08:55 +0100342static inline void arch_local_irq_enable(void)
343{
Christophe Leroyb020aa92019-08-29 08:45:13 +0000344 if (IS_ENABLED(CONFIG_BOOKE))
345 wrtee(MSR_EE);
346 else if (IS_ENABLED(CONFIG_PPC_8xx))
347 wrtspr(SPRN_EIE);
348 else
349 mtmsr(mfmsr() | MSR_EE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}
351
David Howellsdf9ee292010-10-07 14:08:55 +0100352static inline bool arch_irqs_disabled_flags(unsigned long flags)
Steven Rostedte0eca072008-05-14 23:49:43 -0400353{
354 return (flags & MSR_EE) == 0;
355}
356
David Howellsdf9ee292010-10-07 14:08:55 +0100357static inline bool arch_irqs_disabled(void)
358{
359 return arch_irqs_disabled_flags(arch_local_save_flags());
360}
361
362#define hard_irq_disable() arch_local_irq_disable()
363
Benjamin Herrenschmidta5464982012-03-07 16:48:45 +1100364static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
365{
366 return !(regs->msr & MSR_EE);
367}
368
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100369static inline void may_hard_irq_enable(void) { }
370
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000371#endif /* CONFIG_PPC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Thomas Gleixner089fb442011-01-21 06:12:28 +0000373#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
374
Ingo Molnarc0ad90a2006-06-29 02:24:44 -0700375/*
376 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
377 * or should we not care like we do now ? --BenH.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 */
Thomas Gleixner353bca52009-03-10 14:46:30 +0000379struct irq_chip;
Kumar Galab671ad22005-09-21 16:52:55 -0500380
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100381#endif /* __ASSEMBLY__ */
Kumar Galab671ad22005-09-21 16:52:55 -0500382#endif /* __KERNEL__ */
383#endif /* _ASM_POWERPC_HW_IRQ_H */