Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PREEMPT_H |
| 2 | #define __LINUX_PREEMPT_H |
| 3 | |
| 4 | /* |
| 5 | * include/linux/preempt.h - macros for accessing and manipulating |
| 6 | * preempt_count (used for kernel preemption, interrupt count, etc.) |
| 7 | */ |
| 8 | |
Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 9 | #include <linux/thread_info.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/linkage.h> |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 11 | #include <linux/list.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame^] | 13 | /* |
| 14 | * We use the MSB mostly because its available; see <linux/preempt_mask.h> for |
| 15 | * the other bits -- can't include that header due to inclusion hell. |
| 16 | */ |
| 17 | #define PREEMPT_NEED_RESCHED 0x80000000 |
| 18 | |
| 19 | /* |
| 20 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
| 21 | * that think a non-zero value indicates we cannot preempt. |
| 22 | */ |
Peter Zijlstra | 4a2b4b2 | 2013-08-14 14:55:24 +0200 | [diff] [blame] | 23 | static __always_inline int preempt_count(void) |
| 24 | { |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame^] | 25 | return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; |
Peter Zijlstra | 4a2b4b2 | 2013-08-14 14:55:24 +0200 | [diff] [blame] | 26 | } |
| 27 | |
| 28 | static __always_inline int *preempt_count_ptr(void) |
| 29 | { |
| 30 | return ¤t_thread_info()->preempt_count; |
| 31 | } |
| 32 | |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame^] | 33 | /* |
| 34 | * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the |
| 35 | * alternative is loosing a reschedule. Better schedule too often -- also this |
| 36 | * should be a very rare operation. |
| 37 | */ |
Peter Zijlstra | 4a2b4b2 | 2013-08-14 14:55:24 +0200 | [diff] [blame] | 38 | static __always_inline void preempt_count_set(int pc) |
| 39 | { |
| 40 | *preempt_count_ptr() = pc; |
| 41 | } |
| 42 | |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame^] | 43 | /* |
| 44 | * We fold the NEED_RESCHED bit into the preempt count such that |
| 45 | * preempt_enable() can decrement and test for needing to reschedule with a |
| 46 | * single instruction. |
| 47 | * |
| 48 | * We invert the actual bit, so that when the decrement hits 0 we know we both |
| 49 | * need to resched (the bit is cleared) and can resched (no preempt count). |
| 50 | */ |
| 51 | |
| 52 | static __always_inline void set_preempt_need_resched(void) |
| 53 | { |
| 54 | *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; |
| 55 | } |
| 56 | |
| 57 | static __always_inline void clear_preempt_need_resched(void) |
| 58 | { |
| 59 | *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; |
| 60 | } |
| 61 | |
| 62 | static __always_inline bool test_preempt_need_resched(void) |
| 63 | { |
| 64 | return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); |
| 65 | } |
| 66 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 67 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 68 | extern void add_preempt_count(int val); |
| 69 | extern void sub_preempt_count(int val); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | #else |
Peter Zijlstra | 4a2b4b2 | 2013-08-14 14:55:24 +0200 | [diff] [blame] | 71 | # define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0) |
| 72 | # define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | #endif |
| 74 | |
| 75 | #define inc_preempt_count() add_preempt_count(1) |
| 76 | #define dec_preempt_count() sub_preempt_count(1) |
| 77 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #ifdef CONFIG_PREEMPT |
| 79 | |
| 80 | asmlinkage void preempt_schedule(void); |
| 81 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 82 | #define preempt_check_resched() \ |
| 83 | do { \ |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame^] | 84 | if (unlikely(!*preempt_count_ptr())) \ |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 85 | preempt_schedule(); \ |
| 86 | } while (0) |
| 87 | |
Steven Rostedt | 29bb9e5 | 2013-05-24 15:23:40 -0400 | [diff] [blame] | 88 | #ifdef CONFIG_CONTEXT_TRACKING |
| 89 | |
| 90 | void preempt_schedule_context(void); |
| 91 | |
| 92 | #define preempt_check_resched_context() \ |
| 93 | do { \ |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame^] | 94 | if (unlikely(!*preempt_count_ptr())) \ |
Steven Rostedt | 29bb9e5 | 2013-05-24 15:23:40 -0400 | [diff] [blame] | 95 | preempt_schedule_context(); \ |
| 96 | } while (0) |
| 97 | #else |
| 98 | |
| 99 | #define preempt_check_resched_context() preempt_check_resched() |
| 100 | |
| 101 | #endif /* CONFIG_CONTEXT_TRACKING */ |
| 102 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 103 | #else /* !CONFIG_PREEMPT */ |
| 104 | |
| 105 | #define preempt_check_resched() do { } while (0) |
Steven Rostedt | 29bb9e5 | 2013-05-24 15:23:40 -0400 | [diff] [blame] | 106 | #define preempt_check_resched_context() do { } while (0) |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 107 | |
| 108 | #endif /* CONFIG_PREEMPT */ |
| 109 | |
| 110 | |
| 111 | #ifdef CONFIG_PREEMPT_COUNT |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | #define preempt_disable() \ |
| 114 | do { \ |
| 115 | inc_preempt_count(); \ |
| 116 | barrier(); \ |
| 117 | } while (0) |
| 118 | |
Thomas Gleixner | ba74c14 | 2011-03-21 13:32:17 +0100 | [diff] [blame] | 119 | #define sched_preempt_enable_no_resched() \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | do { \ |
| 121 | barrier(); \ |
| 122 | dec_preempt_count(); \ |
| 123 | } while (0) |
| 124 | |
Thomas Gleixner | ba74c14 | 2011-03-21 13:32:17 +0100 | [diff] [blame] | 125 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
| 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | #define preempt_enable() \ |
| 128 | do { \ |
| 129 | preempt_enable_no_resched(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | preempt_check_resched(); \ |
| 131 | } while (0) |
| 132 | |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 133 | /* For debugging and tracer internals only! */ |
| 134 | #define add_preempt_count_notrace(val) \ |
Peter Zijlstra | 4a2b4b2 | 2013-08-14 14:55:24 +0200 | [diff] [blame] | 135 | do { *preempt_count_ptr() += (val); } while (0) |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 136 | #define sub_preempt_count_notrace(val) \ |
Peter Zijlstra | 4a2b4b2 | 2013-08-14 14:55:24 +0200 | [diff] [blame] | 137 | do { *preempt_count_ptr() -= (val); } while (0) |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 138 | #define inc_preempt_count_notrace() add_preempt_count_notrace(1) |
| 139 | #define dec_preempt_count_notrace() sub_preempt_count_notrace(1) |
| 140 | |
| 141 | #define preempt_disable_notrace() \ |
| 142 | do { \ |
| 143 | inc_preempt_count_notrace(); \ |
| 144 | barrier(); \ |
| 145 | } while (0) |
| 146 | |
| 147 | #define preempt_enable_no_resched_notrace() \ |
| 148 | do { \ |
| 149 | barrier(); \ |
| 150 | dec_preempt_count_notrace(); \ |
| 151 | } while (0) |
| 152 | |
| 153 | /* preempt_check_resched is OK to trace */ |
| 154 | #define preempt_enable_notrace() \ |
| 155 | do { \ |
| 156 | preempt_enable_no_resched_notrace(); \ |
Steven Rostedt | 29bb9e5 | 2013-05-24 15:23:40 -0400 | [diff] [blame] | 157 | preempt_check_resched_context(); \ |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 158 | } while (0) |
| 159 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 160 | #else /* !CONFIG_PREEMPT_COUNT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 162 | /* |
| 163 | * Even if we don't have any preemption, we need preempt disable/enable |
| 164 | * to be barriers, so that we don't have things like get_user/put_user |
| 165 | * that can cause faults and scheduling migrate into our preempt-protected |
| 166 | * region. |
| 167 | */ |
| 168 | #define preempt_disable() barrier() |
| 169 | #define sched_preempt_enable_no_resched() barrier() |
| 170 | #define preempt_enable_no_resched() barrier() |
| 171 | #define preempt_enable() barrier() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 173 | #define preempt_disable_notrace() barrier() |
| 174 | #define preempt_enable_no_resched_notrace() barrier() |
| 175 | #define preempt_enable_notrace() barrier() |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 176 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 177 | #endif /* CONFIG_PREEMPT_COUNT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 179 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 180 | |
| 181 | struct preempt_notifier; |
| 182 | |
| 183 | /** |
| 184 | * preempt_ops - notifiers called when a task is preempted and rescheduled |
| 185 | * @sched_in: we're about to be rescheduled: |
| 186 | * notifier: struct preempt_notifier for the task being scheduled |
| 187 | * cpu: cpu we're scheduled on |
| 188 | * @sched_out: we've just been preempted |
| 189 | * notifier: struct preempt_notifier for the task being preempted |
| 190 | * next: the task that's kicking us out |
Tejun Heo | 8592e64 | 2009-12-02 12:56:46 +0900 | [diff] [blame] | 191 | * |
| 192 | * Please note that sched_in and out are called under different |
| 193 | * contexts. sched_out is called with rq lock held and irq disabled |
| 194 | * while sched_in is called without rq lock and irq enabled. This |
| 195 | * difference is intentional and depended upon by its users. |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 196 | */ |
| 197 | struct preempt_ops { |
| 198 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); |
| 199 | void (*sched_out)(struct preempt_notifier *notifier, |
| 200 | struct task_struct *next); |
| 201 | }; |
| 202 | |
| 203 | /** |
| 204 | * preempt_notifier - key for installing preemption notifiers |
| 205 | * @link: internal use |
| 206 | * @ops: defines the notifier functions to be called |
| 207 | * |
| 208 | * Usually used in conjunction with container_of(). |
| 209 | */ |
| 210 | struct preempt_notifier { |
| 211 | struct hlist_node link; |
| 212 | struct preempt_ops *ops; |
| 213 | }; |
| 214 | |
| 215 | void preempt_notifier_register(struct preempt_notifier *notifier); |
| 216 | void preempt_notifier_unregister(struct preempt_notifier *notifier); |
| 217 | |
| 218 | static inline void preempt_notifier_init(struct preempt_notifier *notifier, |
| 219 | struct preempt_ops *ops) |
| 220 | { |
| 221 | INIT_HLIST_NODE(¬ifier->link); |
| 222 | notifier->ops = ops; |
| 223 | } |
| 224 | |
| 225 | #endif |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | #endif /* __LINUX_PREEMPT_H */ |