blob: 0fbbcdf0c178ec5dc6ff5372fa248b121b65b49d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef LINUX_HARDIRQ_H
3#define LINUX_HARDIRQ_H
4
Frederic Weisbecker92cf2112015-05-12 16:41:46 +02005#include <linux/preempt.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07006#include <linux/lockdep.h>
Steven Rostedt6a60dd12008-11-06 15:55:21 -05007#include <linux/ftrace_irq.h>
Frederic Weisbeckerdcbf8322012-10-05 23:07:19 +02008#include <linux/vtime.h>
Peter Zijlstra0bd3a172013-11-19 16:13:38 +01009#include <asm/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Linus Torvalds1da177e2005-04-16 15:20:36 -070012extern void synchronize_irq(unsigned int irq);
Peter Zijlstra02cea392015-02-05 14:06:23 +010013extern bool synchronize_hardirq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Paul E. McKenney127781d2013-03-27 08:44:00 -070015#if defined(CONFIG_TINY_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070016
17static inline void rcu_nmi_enter(void)
18{
19}
20
21static inline void rcu_nmi_exit(void)
22{
23}
24
25#else
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010026extern void rcu_nmi_enter(void);
27extern void rcu_nmi_exit(void);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070028#endif
Steven Rostedt2232c2d2008-02-29 18:46:50 +010029
Ingo Molnarde30a2b2006-07-03 00:24:42 -070030/*
31 * It is safe to do non-atomic ops on ->hardirq_context,
32 * because NMI handlers may not preempt and the ops are
33 * always balanced, so the interrupted value of ->hardirq_context
34 * will always be restored.
35 */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080036#define __irq_enter() \
37 do { \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010038 account_irq_enter_time(current); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020039 preempt_count_add(HARDIRQ_OFFSET); \
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080040 trace_hardirq_enter(); \
41 } while (0)
42
43/*
44 * Enter irq context (on NO_HZ, update jiffies):
45 */
Ingo Molnardde4b2b2007-02-16 01:27:45 -080046extern void irq_enter(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Ingo Molnarde30a2b2006-07-03 00:24:42 -070048/*
49 * Exit irq context without processing softirqs:
50 */
51#define __irq_exit() \
52 do { \
53 trace_hardirq_exit(); \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010054 account_irq_exit_time(current); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020055 preempt_count_sub(HARDIRQ_OFFSET); \
Ingo Molnarde30a2b2006-07-03 00:24:42 -070056 } while (0)
57
58/*
59 * Exit irq context and process softirqs if needed:
60 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070061extern void irq_exit(void);
62
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050063#define nmi_enter() \
64 do { \
Petr Mladek42a0bb32016-05-20 17:00:33 -070065 printk_nmi_enter(); \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050066 lockdep_off(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050067 ftrace_nmi_enter(); \
68 BUG_ON(in_nmi()); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020069 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050070 rcu_nmi_enter(); \
71 trace_hardirq_enter(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040072 } while (0)
Linus Torvalds5f34fe12008-12-30 16:10:19 -080073
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050074#define nmi_exit() \
75 do { \
76 trace_hardirq_exit(); \
77 rcu_nmi_exit(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050078 BUG_ON(!in_nmi()); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020079 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050080 ftrace_nmi_exit(); \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050081 lockdep_on(); \
Petr Mladek42a0bb32016-05-20 17:00:33 -070082 printk_nmi_exit(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040083 } while (0)
Ingo Molnarde30a2b2006-07-03 00:24:42 -070084
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#endif /* LINUX_HARDIRQ_H */