blob: 59749fc48328181452537d226f632559371a7147 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020010#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Peter Zijlstraf27dde82013-08-14 14:55:31 +020012/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
Peter Zijlstraa7878702013-08-14 14:55:40 +020018#include <asm/preempt.h>
Peter Zijlstraf27dde82013-08-14 14:55:31 +020019
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020020#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020021extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#else
Peter Zijlstrabdb43802013-09-10 12:15:23 +020025#define preempt_count_add(val) __preempt_count_add(val)
26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#endif
29
Peter Zijlstrabdb43802013-09-10 12:15:23 +020030#define __preempt_count_inc() __preempt_count_add(1)
31#define __preempt_count_dec() __preempt_count_sub(1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Peter Zijlstrabdb43802013-09-10 12:15:23 +020033#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020035
36#ifdef CONFIG_PREEMPT_COUNT
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#define preempt_disable() \
39do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020040 preempt_count_inc(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 barrier(); \
42} while (0)
43
Thomas Gleixnerba74c142011-03-21 13:32:17 +010044#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -070045do { \
46 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020047 preempt_count_dec(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070048} while (0)
49
Peter Zijlstrabdb43802013-09-10 12:15:23 +020050#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
Thomas Gleixnerba74c142011-03-21 13:32:17 +010051
Peter Zijlstrabdb43802013-09-10 12:15:23 +020052#ifdef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#define preempt_enable() \
54do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020055 barrier(); \
56 if (unlikely(preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020057 __preempt_schedule(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058} while (0)
59
Peter Zijlstrabdb43802013-09-10 12:15:23 +020060#define preempt_check_resched() \
61do { \
62 if (should_resched()) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020063 __preempt_schedule(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020064} while (0)
65
66#else
Peter Zijlstra62b94a02013-11-20 16:52:19 +010067#define preempt_enable() \
68do { \
69 barrier(); \
70 preempt_count_dec(); \
71} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020072#define preempt_check_resched() do { } while (0)
73#endif
Steven Rostedt50282522008-05-12 21:20:41 +020074
75#define preempt_disable_notrace() \
76do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020077 __preempt_count_inc(); \
Steven Rostedt50282522008-05-12 21:20:41 +020078 barrier(); \
79} while (0)
80
81#define preempt_enable_no_resched_notrace() \
82do { \
83 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020084 __preempt_count_dec(); \
Steven Rostedt50282522008-05-12 21:20:41 +020085} while (0)
86
Peter Zijlstrabdb43802013-09-10 12:15:23 +020087#ifdef CONFIG_PREEMPT
88
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020089#ifndef CONFIG_CONTEXT_TRACKING
90#define __preempt_schedule_context() __preempt_schedule()
Peter Zijlstrabdb43802013-09-10 12:15:23 +020091#endif
92
Steven Rostedt50282522008-05-12 21:20:41 +020093#define preempt_enable_notrace() \
94do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020095 barrier(); \
96 if (unlikely(__preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020097 __preempt_schedule_context(); \
Steven Rostedt50282522008-05-12 21:20:41 +020098} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020099#else
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100100#define preempt_enable_notrace() \
101do { \
102 barrier(); \
103 __preempt_count_dec(); \
104} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200105#endif
Steven Rostedt50282522008-05-12 21:20:41 +0200106
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200107#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Linus Torvalds386afc92013-04-09 10:48:33 -0700109/*
110 * Even if we don't have any preemption, we need preempt disable/enable
111 * to be barriers, so that we don't have things like get_user/put_user
112 * that can cause faults and scheduling migrate into our preempt-protected
113 * region.
114 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200115#define preempt_disable() barrier()
Linus Torvalds386afc92013-04-09 10:48:33 -0700116#define sched_preempt_enable_no_resched() barrier()
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200117#define preempt_enable_no_resched() barrier()
118#define preempt_enable() barrier()
119#define preempt_check_resched() do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Linus Torvalds386afc92013-04-09 10:48:33 -0700121#define preempt_disable_notrace() barrier()
122#define preempt_enable_no_resched_notrace() barrier()
123#define preempt_enable_notrace() barrier()
Steven Rostedt50282522008-05-12 21:20:41 +0200124
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200125#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100127#ifdef MODULE
128/*
129 * Modules have no business playing preemption tricks.
130 */
131#undef sched_preempt_enable_no_resched
132#undef preempt_enable_no_resched
133#undef preempt_enable_no_resched_notrace
134#undef preempt_check_resched
135#endif
136
Peter Zijlstra8cb75e02013-11-20 12:22:37 +0100137#ifdef CONFIG_PREEMPT
138#define preempt_set_need_resched() \
139do { \
140 set_preempt_need_resched(); \
141} while (0)
142#define preempt_fold_need_resched() \
143do { \
144 if (tif_need_resched()) \
145 set_preempt_need_resched(); \
146} while (0)
147#else
148#define preempt_set_need_resched() do { } while (0)
149#define preempt_fold_need_resched() do { } while (0)
150#endif
151
Avi Kivitye107be32007-07-26 13:40:43 +0200152#ifdef CONFIG_PREEMPT_NOTIFIERS
153
154struct preempt_notifier;
155
156/**
157 * preempt_ops - notifiers called when a task is preempted and rescheduled
158 * @sched_in: we're about to be rescheduled:
159 * notifier: struct preempt_notifier for the task being scheduled
160 * cpu: cpu we're scheduled on
161 * @sched_out: we've just been preempted
162 * notifier: struct preempt_notifier for the task being preempted
163 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900164 *
165 * Please note that sched_in and out are called under different
166 * contexts. sched_out is called with rq lock held and irq disabled
167 * while sched_in is called without rq lock and irq enabled. This
168 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200169 */
170struct preempt_ops {
171 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
172 void (*sched_out)(struct preempt_notifier *notifier,
173 struct task_struct *next);
174};
175
176/**
177 * preempt_notifier - key for installing preemption notifiers
178 * @link: internal use
179 * @ops: defines the notifier functions to be called
180 *
181 * Usually used in conjunction with container_of().
182 */
183struct preempt_notifier {
184 struct hlist_node link;
185 struct preempt_ops *ops;
186};
187
188void preempt_notifier_register(struct preempt_notifier *notifier);
189void preempt_notifier_unregister(struct preempt_notifier *notifier);
190
191static inline void preempt_notifier_init(struct preempt_notifier *notifier,
192 struct preempt_ops *ops)
193{
194 INIT_HLIST_NODE(&notifier->link);
195 notifier->ops = ops;
196}
197
198#endif
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200#endif /* __LINUX_PREEMPT_H */