blob: 92e341853e4b4d6fcf0f45e56180bb7d4ec2bd9a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Al Virof0373602005-11-13 16:06:57 -08009#include <linux/thread_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020011#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Peter Zijlstraf27dde82013-08-14 14:55:31 +020013/*
14 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
15 * the other bits -- can't include that header due to inclusion hell.
16 */
17#define PREEMPT_NEED_RESCHED 0x80000000
18
19/*
20 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
21 * that think a non-zero value indicates we cannot preempt.
22 */
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +020023static __always_inline int preempt_count(void)
24{
Peter Zijlstraf27dde82013-08-14 14:55:31 +020025 return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +020026}
27
28static __always_inline int *preempt_count_ptr(void)
29{
30 return &current_thread_info()->preempt_count;
31}
32
Peter Zijlstraf27dde82013-08-14 14:55:31 +020033/*
34 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
35 * alternative is loosing a reschedule. Better schedule too often -- also this
36 * should be a very rare operation.
37 */
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +020038static __always_inline void preempt_count_set(int pc)
39{
40 *preempt_count_ptr() = pc;
41}
42
Peter Zijlstraf27dde82013-08-14 14:55:31 +020043/*
44 * We fold the NEED_RESCHED bit into the preempt count such that
45 * preempt_enable() can decrement and test for needing to reschedule with a
46 * single instruction.
47 *
48 * We invert the actual bit, so that when the decrement hits 0 we know we both
49 * need to resched (the bit is cleared) and can resched (no preempt count).
50 */
51
52static __always_inline void set_preempt_need_resched(void)
53{
54 *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
55}
56
57static __always_inline void clear_preempt_need_resched(void)
58{
59 *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
60}
61
62static __always_inline bool test_preempt_need_resched(void)
63{
64 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
65}
66
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020067#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Harvey Harrisonec701582008-02-08 04:19:55 -080068 extern void add_preempt_count(int val);
69 extern void sub_preempt_count(int val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#else
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +020071# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0)
72# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#endif
74
75#define inc_preempt_count() add_preempt_count(1)
76#define dec_preempt_count() sub_preempt_count(1)
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#ifdef CONFIG_PREEMPT
79
80asmlinkage void preempt_schedule(void);
81
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020082#define preempt_check_resched() \
83do { \
Peter Zijlstraf27dde82013-08-14 14:55:31 +020084 if (unlikely(!*preempt_count_ptr())) \
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020085 preempt_schedule(); \
86} while (0)
87
Steven Rostedt29bb9e52013-05-24 15:23:40 -040088#ifdef CONFIG_CONTEXT_TRACKING
89
90void preempt_schedule_context(void);
91
92#define preempt_check_resched_context() \
93do { \
Peter Zijlstraf27dde82013-08-14 14:55:31 +020094 if (unlikely(!*preempt_count_ptr())) \
Steven Rostedt29bb9e52013-05-24 15:23:40 -040095 preempt_schedule_context(); \
96} while (0)
97#else
98
99#define preempt_check_resched_context() preempt_check_resched()
100
101#endif /* CONFIG_CONTEXT_TRACKING */
102
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200103#else /* !CONFIG_PREEMPT */
104
105#define preempt_check_resched() do { } while (0)
Steven Rostedt29bb9e52013-05-24 15:23:40 -0400106#define preempt_check_resched_context() do { } while (0)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200107
108#endif /* CONFIG_PREEMPT */
109
110
111#ifdef CONFIG_PREEMPT_COUNT
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#define preempt_disable() \
114do { \
115 inc_preempt_count(); \
116 barrier(); \
117} while (0)
118
Thomas Gleixnerba74c142011-03-21 13:32:17 +0100119#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120do { \
121 barrier(); \
122 dec_preempt_count(); \
123} while (0)
124
Thomas Gleixnerba74c142011-03-21 13:32:17 +0100125#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define preempt_enable() \
128do { \
129 preempt_enable_no_resched(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 preempt_check_resched(); \
131} while (0)
132
Steven Rostedt50282522008-05-12 21:20:41 +0200133/* For debugging and tracer internals only! */
134#define add_preempt_count_notrace(val) \
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +0200135 do { *preempt_count_ptr() += (val); } while (0)
Steven Rostedt50282522008-05-12 21:20:41 +0200136#define sub_preempt_count_notrace(val) \
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +0200137 do { *preempt_count_ptr() -= (val); } while (0)
Steven Rostedt50282522008-05-12 21:20:41 +0200138#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
139#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
140
141#define preempt_disable_notrace() \
142do { \
143 inc_preempt_count_notrace(); \
144 barrier(); \
145} while (0)
146
147#define preempt_enable_no_resched_notrace() \
148do { \
149 barrier(); \
150 dec_preempt_count_notrace(); \
151} while (0)
152
153/* preempt_check_resched is OK to trace */
154#define preempt_enable_notrace() \
155do { \
156 preempt_enable_no_resched_notrace(); \
Steven Rostedt29bb9e52013-05-24 15:23:40 -0400157 preempt_check_resched_context(); \
Steven Rostedt50282522008-05-12 21:20:41 +0200158} while (0)
159
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200160#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Linus Torvalds386afc92013-04-09 10:48:33 -0700162/*
163 * Even if we don't have any preemption, we need preempt disable/enable
164 * to be barriers, so that we don't have things like get_user/put_user
165 * that can cause faults and scheduling migrate into our preempt-protected
166 * region.
167 */
168#define preempt_disable() barrier()
169#define sched_preempt_enable_no_resched() barrier()
170#define preempt_enable_no_resched() barrier()
171#define preempt_enable() barrier()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Linus Torvalds386afc92013-04-09 10:48:33 -0700173#define preempt_disable_notrace() barrier()
174#define preempt_enable_no_resched_notrace() barrier()
175#define preempt_enable_notrace() barrier()
Steven Rostedt50282522008-05-12 21:20:41 +0200176
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200177#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Avi Kivitye107be32007-07-26 13:40:43 +0200179#ifdef CONFIG_PREEMPT_NOTIFIERS
180
181struct preempt_notifier;
182
183/**
184 * preempt_ops - notifiers called when a task is preempted and rescheduled
185 * @sched_in: we're about to be rescheduled:
186 * notifier: struct preempt_notifier for the task being scheduled
187 * cpu: cpu we're scheduled on
188 * @sched_out: we've just been preempted
189 * notifier: struct preempt_notifier for the task being preempted
190 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900191 *
192 * Please note that sched_in and out are called under different
193 * contexts. sched_out is called with rq lock held and irq disabled
194 * while sched_in is called without rq lock and irq enabled. This
195 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200196 */
197struct preempt_ops {
198 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
199 void (*sched_out)(struct preempt_notifier *notifier,
200 struct task_struct *next);
201};
202
203/**
204 * preempt_notifier - key for installing preemption notifiers
205 * @link: internal use
206 * @ops: defines the notifier functions to be called
207 *
208 * Usually used in conjunction with container_of().
209 */
210struct preempt_notifier {
211 struct hlist_node link;
212 struct preempt_ops *ops;
213};
214
215void preempt_notifier_register(struct preempt_notifier *notifier);
216void preempt_notifier_unregister(struct preempt_notifier *notifier);
217
218static inline void preempt_notifier_init(struct preempt_notifier *notifier,
219 struct preempt_ops *ops)
220{
221 INIT_HLIST_NODE(&notifier->link);
222 notifier->ops = ops;
223}
224
225#endif
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227#endif /* __LINUX_PREEMPT_H */