Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef LINUX_HARDIRQ_H |
| 3 | #define LINUX_HARDIRQ_H |
| 4 | |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 5 | #include <linux/preempt.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 6 | #include <linux/lockdep.h> |
Steven Rostedt | 6a60dd1 | 2008-11-06 15:55:21 -0500 | [diff] [blame] | 7 | #include <linux/ftrace_irq.h> |
Frederic Weisbecker | dcbf832 | 2012-10-05 23:07:19 +0200 | [diff] [blame] | 8 | #include <linux/vtime.h> |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 9 | #include <asm/hardirq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | extern void synchronize_irq(unsigned int irq); |
Peter Zijlstra | 02cea39 | 2015-02-05 14:06:23 +0100 | [diff] [blame] | 13 | extern bool synchronize_hardirq(unsigned int irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Paul E. McKenney | 127781d | 2013-03-27 08:44:00 -0700 | [diff] [blame] | 15 | #if defined(CONFIG_TINY_RCU) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 16 | |
| 17 | static inline void rcu_nmi_enter(void) |
| 18 | { |
| 19 | } |
| 20 | |
| 21 | static inline void rcu_nmi_exit(void) |
| 22 | { |
| 23 | } |
| 24 | |
| 25 | #else |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 26 | extern void rcu_nmi_enter(void); |
| 27 | extern void rcu_nmi_exit(void); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 28 | #endif |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 29 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 30 | /* |
| 31 | * It is safe to do non-atomic ops on ->hardirq_context, |
| 32 | * because NMI handlers may not preempt and the ops are |
| 33 | * always balanced, so the interrupted value of ->hardirq_context |
| 34 | * will always be restored. |
| 35 | */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 36 | #define __irq_enter() \ |
| 37 | do { \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 38 | account_irq_enter_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 39 | preempt_count_add(HARDIRQ_OFFSET); \ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 40 | trace_hardirq_enter(); \ |
| 41 | } while (0) |
| 42 | |
| 43 | /* |
| 44 | * Enter irq context (on NO_HZ, update jiffies): |
| 45 | */ |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 46 | extern void irq_enter(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 48 | /* |
| 49 | * Exit irq context without processing softirqs: |
| 50 | */ |
| 51 | #define __irq_exit() \ |
| 52 | do { \ |
| 53 | trace_hardirq_exit(); \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 54 | account_irq_exit_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 55 | preempt_count_sub(HARDIRQ_OFFSET); \ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 56 | } while (0) |
| 57 | |
| 58 | /* |
| 59 | * Exit irq context and process softirqs if needed: |
| 60 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | extern void irq_exit(void); |
| 62 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 63 | #define nmi_enter() \ |
| 64 | do { \ |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 65 | printk_nmi_enter(); \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 66 | lockdep_off(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 67 | ftrace_nmi_enter(); \ |
| 68 | BUG_ON(in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 69 | preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 70 | rcu_nmi_enter(); \ |
| 71 | trace_hardirq_enter(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 72 | } while (0) |
Linus Torvalds | 5f34fe1 | 2008-12-30 16:10:19 -0800 | [diff] [blame] | 73 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 74 | #define nmi_exit() \ |
| 75 | do { \ |
| 76 | trace_hardirq_exit(); \ |
| 77 | rcu_nmi_exit(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 78 | BUG_ON(!in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 79 | preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 80 | ftrace_nmi_exit(); \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 81 | lockdep_on(); \ |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 82 | printk_nmi_exit(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 83 | } while (0) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | #endif /* LINUX_HARDIRQ_H */ |