Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef LINUX_HARDIRQ_H |
| 2 | #define LINUX_HARDIRQ_H |
| 3 | |
Frederic Weisbecker | 2d4b847 | 2013-07-29 20:29:43 +0200 | [diff] [blame] | 4 | #include <linux/preempt_mask.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 5 | #include <linux/lockdep.h> |
Steven Rostedt | 6a60dd1 | 2008-11-06 15:55:21 -0500 | [diff] [blame] | 6 | #include <linux/ftrace_irq.h> |
Frederic Weisbecker | dcbf832 | 2012-10-05 23:07:19 +0200 | [diff] [blame] | 7 | #include <linux/vtime.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | extern void synchronize_irq(unsigned int irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Paul E. McKenney | 127781d | 2013-03-27 08:44:00 -0700 | [diff] [blame] | 12 | #if defined(CONFIG_TINY_RCU) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 13 | |
| 14 | static inline void rcu_nmi_enter(void) |
| 15 | { |
| 16 | } |
| 17 | |
| 18 | static inline void rcu_nmi_exit(void) |
| 19 | { |
| 20 | } |
| 21 | |
| 22 | #else |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 23 | extern void rcu_nmi_enter(void); |
| 24 | extern void rcu_nmi_exit(void); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 25 | #endif |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 26 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 27 | /* |
| 28 | * It is safe to do non-atomic ops on ->hardirq_context, |
| 29 | * because NMI handlers may not preempt and the ops are |
| 30 | * always balanced, so the interrupted value of ->hardirq_context |
| 31 | * will always be restored. |
| 32 | */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 33 | #define __irq_enter() \ |
| 34 | do { \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 35 | account_irq_enter_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame^] | 36 | preempt_count_add(HARDIRQ_OFFSET); \ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 37 | trace_hardirq_enter(); \ |
| 38 | } while (0) |
| 39 | |
| 40 | /* |
| 41 | * Enter irq context (on NO_HZ, update jiffies): |
| 42 | */ |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 43 | extern void irq_enter(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 45 | /* |
| 46 | * Exit irq context without processing softirqs: |
| 47 | */ |
| 48 | #define __irq_exit() \ |
| 49 | do { \ |
| 50 | trace_hardirq_exit(); \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 51 | account_irq_exit_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame^] | 52 | preempt_count_sub(HARDIRQ_OFFSET); \ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 53 | } while (0) |
| 54 | |
| 55 | /* |
| 56 | * Exit irq context and process softirqs if needed: |
| 57 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | extern void irq_exit(void); |
| 59 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 60 | #define nmi_enter() \ |
| 61 | do { \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 62 | lockdep_off(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 63 | ftrace_nmi_enter(); \ |
| 64 | BUG_ON(in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame^] | 65 | preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 66 | rcu_nmi_enter(); \ |
| 67 | trace_hardirq_enter(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 68 | } while (0) |
Linus Torvalds | 5f34fe1 | 2008-12-30 16:10:19 -0800 | [diff] [blame] | 69 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 70 | #define nmi_exit() \ |
| 71 | do { \ |
| 72 | trace_hardirq_exit(); \ |
| 73 | rcu_nmi_exit(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 74 | BUG_ON(!in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame^] | 75 | preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 76 | ftrace_nmi_exit(); \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 77 | lockdep_on(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 78 | } while (0) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | #endif /* LINUX_HARDIRQ_H */ |