blob: 9941d1a8d83c329438bb049403ead11ce4e7a40d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* interrupt.h */
3#ifndef _LINUX_INTERRUPT_H
4#define _LINUX_INTERRUPT_H
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/cpumask.h>
Jan Beulich908dcec2006-06-23 02:06:00 -07009#include <linux/irqreturn.h>
Thomas Gleixnerdd3a1db2008-10-16 18:20:58 +020010#include <linux/irqnr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/hardirq.h>
Ingo Molnarde30a2b2006-07-03 00:24:42 -070012#include <linux/irqflags.h>
Peter Zijlstra9ba5f002009-07-22 14:18:35 +020013#include <linux/hrtimer.h>
Ben Hutchingscd7eab42011-01-19 21:01:44 +000014#include <linux/kref.h>
15#include <linux/workqueue.h>
Ingo Molnar0ebb26e2008-12-12 11:26:39 +010016
Arun Sharma600634972011-07-26 16:09:06 -070017#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/ptrace.h>
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +020019#include <asm/irq.h>
Masami Hiramatsu229a7182017-08-03 11:38:21 +090020#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixner6e213612006-07-01 19:29:03 -070022/*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38/*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
Thomas Gleixner6e213612006-07-01 19:29:03 -070042 * IRQF_SHARED - allow sharing the irq among several devices
43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
Thomas Gleixner950f4422007-02-16 01:27:24 -080045 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
Bernhard Walled85a60d2007-05-08 00:35:24 -070047 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
Geert Uytterhoevenb8d62f32018-10-08 13:17:26 +020048 * registered first in a shared interrupt is considered for
Bernhard Walled85a60d2007-05-08 00:35:24 -070049 * performance reasons)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020050 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 * Used by threaded interrupts which need to keep the
52 * irq line disabled until the threaded handler has been run.
Mark Rutland737eb032015-02-20 14:53:46 +000053 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
54 * that this interrupt will wake the system from a suspended
55 * state. See Documentation/power/suspend-and-interrupts.txt
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010056 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000057 * IRQF_NO_THREAD - Interrupt cannot be threaded
Ian Campbell9bab0b72011-10-03 15:37:00 +010058 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 * resume time.
Rafael J. Wysocki17f48032015-02-27 00:07:55 +010060 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61 * interrupt handler after suspending interrupts. For system
62 * wakeup devices users need to implement wakeup detection in
63 * their interrupt handlers.
Thomas Gleixner6e213612006-07-01 19:29:03 -070064 */
Thomas Gleixner6e213612006-07-01 19:29:03 -070065#define IRQF_SHARED 0x00000080
66#define IRQF_PROBE_SHARED 0x00000100
Ian Campbell685fd0b2010-07-29 11:16:32 +010067#define __IRQF_TIMER 0x00000200
Thomas Gleixner284c6682006-07-03 02:20:32 +020068#define IRQF_PERCPU 0x00000400
Thomas Gleixner950f4422007-02-16 01:27:24 -080069#define IRQF_NOBALANCING 0x00000800
Bernhard Walled85a60d2007-05-08 00:35:24 -070070#define IRQF_IRQPOLL 0x00001000
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020071#define IRQF_ONESHOT 0x00002000
Ian Campbell685fd0b2010-07-29 11:16:32 +010072#define IRQF_NO_SUSPEND 0x00004000
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010073#define IRQF_FORCE_RESUME 0x00008000
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000074#define IRQF_NO_THREAD 0x00010000
Ian Campbell9bab0b72011-10-03 15:37:00 +010075#define IRQF_EARLY_RESUME 0x00020000
Rafael J. Wysocki17f48032015-02-27 00:07:55 +010076#define IRQF_COND_SUSPEND 0x00040000
Ian Campbell685fd0b2010-07-29 11:16:32 +010077
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000078#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010079
Randy Dunlapb4e6b092010-05-21 09:03:01 -070080/*
Marc Zyngierae731f82010-03-15 22:56:33 +000081 * These values can be returned by request_any_context_irq() and
82 * describe the context the interrupt will be run in.
83 *
84 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
86 */
87enum {
88 IRQC_IS_HARDIRQ = 0,
89 IRQC_IS_NESTED,
90};
91
David Howells7d12e782006-10-05 14:55:46 +010092typedef irqreturn_t (*irq_handler_t)(int, void *);
David Howellsda482792006-10-05 13:06:34 +010093
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010094/**
95 * struct irqaction - per interrupt action descriptor
96 * @handler: interrupt handler function
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010097 * @name: name of the device
98 * @dev_id: cookie to identify the device
Marc Zyngier31d9d9b2011-09-23 17:03:06 +010099 * @percpu_dev_id: cookie to identify the device
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +0100100 * @next: pointer to the next irqaction for shared interrupts
101 * @irq: interrupt number
Thomas Gleixnerc0ecaa02012-04-19 11:53:25 +0200102 * @flags: flags (see IRQF_* above)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300103 * @thread_fn: interrupt handler function for threaded interrupts
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100104 * @thread: thread pointer for threaded interrupts
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200105 * @secondary: pointer to secondary irqaction (force threading)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100106 * @thread_flags: flags related to @thread
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000107 * @thread_mask: bitmask for keeping track of @thread activity
Thomas Gleixnerc0ecaa02012-04-19 11:53:25 +0200108 * @dir: pointer to the proc/irq/NN/name entry
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +0100109 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110struct irqaction {
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100111 irq_handler_t handler;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100112 void *dev_id;
113 void __percpu *percpu_dev_id;
114 struct irqaction *next;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100115 irq_handler_t thread_fn;
116 struct task_struct *thread;
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200117 struct irqaction *secondary;
Thomas Gleixnerc0ecaa02012-04-19 11:53:25 +0200118 unsigned int irq;
119 unsigned int flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100120 unsigned long thread_flags;
121 unsigned long thread_mask;
122 const char *name;
123 struct proc_dir_entry *dir;
Eric Dumazetf6cd2472010-11-04 11:13:48 +0100124} ____cacheline_internodealigned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
David Howells7d12e782006-10-05 14:55:46 +0100126extern irqreturn_t no_action(int cpl, void *dev_id);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100127
Chen Fane237a552016-02-15 12:52:01 +0800128/*
129 * If a (PCI) device interrupt is not connected we set dev->irq to
130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131 * can distingiush that case from other error returns.
132 *
133 * 0x80000000 is guaranteed to be outside the available range of interrupts
134 * and easy to distinguish from other possible incorrect values.
135 */
136#define IRQ_NOTCONNECTED (1U << 31)
137
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100138extern int __must_check
139request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 irq_handler_t thread_fn,
141 unsigned long flags, const char *name, void *dev);
142
143static inline int __must_check
144request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145 const char *name, void *dev)
146{
147 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
148}
149
Marc Zyngierae731f82010-03-15 22:56:33 +0000150extern int __must_check
151request_any_context_irq(unsigned int irq, irq_handler_t handler,
152 unsigned long flags, const char *name, void *dev_id);
153
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100154extern int __must_check
Daniel Lezcanoc80081b2017-07-06 14:29:04 +0200155__request_percpu_irq(unsigned int irq, irq_handler_t handler,
156 unsigned long flags, const char *devname,
157 void __percpu *percpu_dev_id);
158
Julien Thierryb5259032019-01-31 14:53:58 +0000159extern int __must_check
160request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
161 const char *name, void *dev);
162
Daniel Lezcanoc80081b2017-07-06 14:29:04 +0200163static inline int __must_check
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100164request_percpu_irq(unsigned int irq, irq_handler_t handler,
Daniel Lezcanoc80081b2017-07-06 14:29:04 +0200165 const char *devname, void __percpu *percpu_dev_id)
166{
167 return __request_percpu_irq(irq, handler, 0,
168 devname, percpu_dev_id);
169}
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100170
Christoph Hellwig25ce4be2017-04-13 09:06:41 +0200171extern const void *free_irq(unsigned int, void *);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100172extern void free_percpu_irq(unsigned int, void __percpu *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Julien Thierryb5259032019-01-31 14:53:58 +0000174extern const void *free_nmi(unsigned int irq, void *dev_id);
175
Al Viro0af36782007-07-27 14:24:33 +0100176struct device;
177
Arjan van de Ven935bd5b2009-03-23 18:28:16 +0100178extern int __must_check
179devm_request_threaded_irq(struct device *dev, unsigned int irq,
180 irq_handler_t handler, irq_handler_t thread_fn,
181 unsigned long irqflags, const char *devname,
182 void *dev_id);
183
184static inline int __must_check
185devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
186 unsigned long irqflags, const char *devname, void *dev_id)
187{
188 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
189 devname, dev_id);
190}
191
Stephen Boyd0668d302014-01-02 16:37:32 -0800192extern int __must_check
193devm_request_any_context_irq(struct device *dev, unsigned int irq,
194 irq_handler_t handler, unsigned long irqflags,
195 const char *devname, void *dev_id);
196
Tejun Heo9ac78492007-01-20 16:00:26 +0900197extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
198
Ingo Molnard7e96292006-07-03 00:24:27 -0700199/*
200 * On lockdep we dont want to enable hardirqs in hardirq
201 * context. Use local_irq_enable_in_hardirq() to annotate
202 * kernel code that has to do this nevertheless (pretty much
203 * the only valid case is for old/broken hardware that is
204 * insanely slow).
205 *
206 * NOTE: in theory this might break fragile code that relies
207 * on hardirq delivery - in practice we dont seem to have such
208 * places left. So the only effect should be slightly increased
209 * irqs-off latencies.
210 */
211#ifdef CONFIG_LOCKDEP
212# define local_irq_enable_in_hardirq() do { } while (0)
213#else
214# define local_irq_enable_in_hardirq() local_irq_enable()
215#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217extern void disable_irq_nosync(unsigned int irq);
Peter Zijlstra02cea392015-02-05 14:06:23 +0100218extern bool disable_hardirq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219extern void disable_irq(unsigned int irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100220extern void disable_percpu_irq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221extern void enable_irq(unsigned int irq);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +0100222extern void enable_percpu_irq(unsigned int irq, unsigned int type);
Thomas Petazzonif0cb3222015-10-20 15:23:51 +0200223extern bool irq_percpu_is_enabled(unsigned int irq);
Thomas Gleixnera92444c2014-02-15 00:55:19 +0000224extern void irq_wake_thread(unsigned int irq, void *dev_id);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700225
Julien Thierryb5259032019-01-31 14:53:58 +0000226extern void disable_nmi_nosync(unsigned int irq);
227extern void enable_nmi(unsigned int irq);
228
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100229/* The following three functions are for the core kernel use only. */
230extern void suspend_device_irqs(void);
231extern void resume_device_irqs(void);
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100232
Eyal Perryf0ba3d02014-05-20 17:57:00 +0300233/**
234 * struct irq_affinity_notify - context for notification of IRQ affinity changes
235 * @irq: Interrupt to which notification applies
236 * @kref: Reference count, for internal use
237 * @work: Work item, for internal use
238 * @notify: Function to be called on change. This will be
239 * called in process context.
240 * @release: Function to be called on release. This will be
241 * called in process context. Once registered, the
242 * structure must only be freed when this function is
243 * called or later.
244 */
245struct irq_affinity_notify {
246 unsigned int irq;
247 struct kref kref;
248 struct work_struct work;
249 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
250 void (*release)(struct kref *ref);
251};
252
Christoph Hellwig20e407e2016-11-08 17:15:01 -0800253/**
254 * struct irq_affinity - Description for automatic irq affinity assignements
255 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
256 * the MSI(-X) vector space
257 * @post_vectors: Don't apply affinity to @post_vectors at end of
258 * the MSI(-X) vector space
Jens Axboe6da4b3a2018-11-02 22:59:51 +0800259 * @nr_sets: Length of passed in *sets array
260 * @sets: Number of affinitized sets
Christoph Hellwig20e407e2016-11-08 17:15:01 -0800261 */
262struct irq_affinity {
263 int pre_vectors;
264 int post_vectors;
Jens Axboe6da4b3a2018-11-02 22:59:51 +0800265 int nr_sets;
266 int *sets;
Christoph Hellwig20e407e2016-11-08 17:15:01 -0800267};
268
Dou Liyangbec04032018-12-04 23:51:20 +0800269/**
270 * struct irq_affinity_desc - Interrupt affinity descriptor
271 * @mask: cpumask to hold the affinity assignment
272 */
273struct irq_affinity_desc {
274 struct cpumask mask;
Dou Liyangc410abbb2018-12-04 23:51:21 +0800275 unsigned int is_managed : 1;
Dou Liyangbec04032018-12-04 23:51:20 +0800276};
277
Martin Schwidefsky0244ad02013-08-30 09:39:53 +0200278#if defined(CONFIG_SMP)
Russell Kingd7b90682008-04-17 07:46:24 +0200279
Rusty Russelld036e672009-01-01 10:12:26 +1030280extern cpumask_var_t irq_default_affinity;
Max Krasnyansky18404752008-05-29 11:02:52 -0700281
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000282/* Internal implementation. Use the helpers below */
283extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
284 bool force);
285
286/**
287 * irq_set_affinity - Set the irq affinity of a given irq
288 * @irq: Interrupt to set affinity
Randy Dunlapdef5f122014-04-27 21:03:09 -0700289 * @cpumask: cpumask
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000290 *
291 * Fails if cpumask does not contain an online CPU
292 */
293static inline int
294irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
295{
296 return __irq_set_affinity(irq, cpumask, false);
297}
298
299/**
300 * irq_force_affinity - Force the irq affinity of a given irq
301 * @irq: Interrupt to set affinity
Randy Dunlapdef5f122014-04-27 21:03:09 -0700302 * @cpumask: cpumask
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000303 *
304 * Same as irq_set_affinity, but without checking the mask against
305 * online cpus.
306 *
307 * Solely for low level cpu hotplug code, where we need to make per
308 * cpu interrupts affine before the cpu becomes online.
309 */
310static inline int
311irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
312{
313 return __irq_set_affinity(irq, cpumask, true);
314}
315
Russell Kingd7b90682008-04-17 07:46:24 +0200316extern int irq_can_set_affinity(unsigned int irq);
Max Krasnyansky18404752008-05-29 11:02:52 -0700317extern int irq_select_affinity(unsigned int irq);
Russell Kingd7b90682008-04-17 07:46:24 +0200318
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700319extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000320
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000321extern int
322irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
323
Dou Liyangbec04032018-12-04 23:51:20 +0800324struct irq_affinity_desc *
325irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
326
Michael Hernandez6f9a22b2017-05-18 10:47:47 -0700327int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
Christoph Hellwig5e385a62016-07-04 17:39:27 +0900328
Russell Kingd7b90682008-04-17 07:46:24 +0200329#else /* CONFIG_SMP */
330
Rusty Russell0de26522008-12-13 21:20:26 +1030331static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
Russell Kingd7b90682008-04-17 07:46:24 +0200332{
333 return -EINVAL;
334}
335
Arnd Bergmann4c88d7f2014-04-23 14:49:17 +0200336static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
337{
338 return 0;
339}
340
Russell Kingd7b90682008-04-17 07:46:24 +0200341static inline int irq_can_set_affinity(unsigned int irq)
342{
343 return 0;
344}
345
Max Krasnyansky18404752008-05-29 11:02:52 -0700346static inline int irq_select_affinity(unsigned int irq) { return 0; }
347
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700348static inline int irq_set_affinity_hint(unsigned int irq,
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000349 const struct cpumask *m)
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700350{
351 return -EINVAL;
352}
Eyal Perryf0ba3d02014-05-20 17:57:00 +0300353
354static inline int
355irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
356{
357 return 0;
358}
Christoph Hellwig5e385a62016-07-04 17:39:27 +0900359
Dou Liyangbec04032018-12-04 23:51:20 +0800360static inline struct irq_affinity_desc *
Christoph Hellwig67c93c22016-11-08 17:15:03 -0800361irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
Thomas Gleixner34c3d982016-09-14 16:18:48 +0200362{
363 return NULL;
364}
365
366static inline int
Michael Hernandez6f9a22b2017-05-18 10:47:47 -0700367irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
Thomas Gleixner34c3d982016-09-14 16:18:48 +0200368{
369 return maxvec;
370}
371
Martin Schwidefsky0244ad02013-08-30 09:39:53 +0200372#endif /* CONFIG_SMP */
Russell Kingd7b90682008-04-17 07:46:24 +0200373
Ingo Molnarc01d4032006-07-03 00:24:27 -0700374/*
375 * Special lockdep variants of irq disabling/enabling.
376 * These should be used for locking constructs that
377 * know that a particular irq context which is disabled,
378 * and which is the only irq-context user of a lock,
379 * that it's safe to take the lock in the irq-disabled
380 * section without disabling hardirqs.
381 *
382 * On !CONFIG_LOCKDEP they are equivalent to the normal
383 * irq disable/enable methods.
384 */
385static inline void disable_irq_nosync_lockdep(unsigned int irq)
386{
387 disable_irq_nosync(irq);
388#ifdef CONFIG_LOCKDEP
389 local_irq_disable();
390#endif
391}
392
Arjan van de Vene8106b92006-09-29 02:01:08 -0700393static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
394{
395 disable_irq_nosync(irq);
396#ifdef CONFIG_LOCKDEP
397 local_irq_save(*flags);
398#endif
399}
400
Ingo Molnarc01d4032006-07-03 00:24:27 -0700401static inline void disable_irq_lockdep(unsigned int irq)
402{
403 disable_irq(irq);
404#ifdef CONFIG_LOCKDEP
405 local_irq_disable();
406#endif
407}
408
409static inline void enable_irq_lockdep(unsigned int irq)
410{
411#ifdef CONFIG_LOCKDEP
412 local_irq_enable();
413#endif
414 enable_irq(irq);
415}
416
Arjan van de Vene8106b92006-09-29 02:01:08 -0700417static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
418{
419#ifdef CONFIG_LOCKDEP
420 local_irq_restore(*flags);
421#endif
422 enable_irq(irq);
423}
424
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700425/* IRQ wakeup (PM) control: */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100426extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
427
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700428static inline int enable_irq_wake(unsigned int irq)
429{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100430 return irq_set_irq_wake(irq, 1);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700431}
432
433static inline int disable_irq_wake(unsigned int irq)
434{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100435 return irq_set_irq_wake(irq, 0);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700436}
437
Marc Zyngier1b7047e2015-03-18 11:01:22 +0000438/*
439 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
440 */
441enum irqchip_irq_state {
442 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
443 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
444 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
445 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
446};
447
448extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
449 bool *state);
450extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
451 bool state);
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000452
453#ifdef CONFIG_IRQ_FORCED_THREADING
454extern bool force_irqthreads;
455#else
456#define force_irqthreads (0)
457#endif
458
Frederic Weisbecker0fd7d862018-05-08 15:38:20 +0200459#ifndef local_softirq_pending
460
461#ifndef local_softirq_pending_ref
462#define local_softirq_pending_ref irq_stat.__softirq_pending
463#endif
464
465#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
466#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
467#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
468
Frederic Weisbecker0fd7d862018-05-08 15:38:20 +0200469#endif /* local_softirq_pending */
470
Benjamin Herrenschmidt2d3fbbb2007-05-10 22:22:46 -0700471/* Some architectures might implement lazy enabling/disabling of
472 * interrupts. In some cases, such as stop_machine, we might want
473 * to ensure that after a local_irq_disable(), interrupts have
474 * really been disabled in hardware. Such architectures need to
475 * implement the following hook.
476 */
477#ifndef hard_irq_disable
478#define hard_irq_disable() do { } while(0)
479#endif
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
482 frequency threaded job scheduling. For almost all the purposes
483 tasklets are more than enough. F.e. all serial device BHs et
484 al. should be converted to tasklets, not to softirqs.
485 */
486
487enum
488{
489 HI_SOFTIRQ=0,
490 TIMER_SOFTIRQ,
491 NET_TX_SOFTIRQ,
492 NET_RX_SOFTIRQ,
Jens Axboeff856ba2006-01-09 16:02:34 +0100493 BLOCK_SOFTIRQ,
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100494 IRQ_POLL_SOFTIRQ,
Christoph Lameterc9819f42006-12-10 02:20:25 -0800495 TASKLET_SOFTIRQ,
496 SCHED_SOFTIRQ,
Thomas Gleixnerc6eb3f72015-04-14 21:08:51 +0000497 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
498 numbering. Sigh! */
Shaohua Li09223372011-06-14 13:26:25 +0800499 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
Alexey Dobriyan978b0112008-09-06 20:04:36 +0200500
501 NR_SOFTIRQS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502};
503
Paul E. McKenney803b0eb2012-08-23 08:34:07 -0700504#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
505
Jason Baron5d592b42009-03-12 14:33:36 -0400506/* map softirq index to softirq name. update 'softirq_to_name' in
507 * kernel/softirq.c when adding a new softirq.
508 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800509extern const char * const softirq_to_name[NR_SOFTIRQS];
Jason Baron5d592b42009-03-12 14:33:36 -0400510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511/* softirq mask and active fields moved to irq_cpustat_t in
512 * asm/hardirq.h to get better cache usage. KAO
513 */
514
515struct softirq_action
516{
517 void (*action)(struct softirq_action *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518};
519
520asmlinkage void do_softirq(void);
Adrian Bunkeb0f1c42008-04-29 00:59:12 -0700521asmlinkage void __do_softirq(void);
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200522
523#ifdef __ARCH_HAS_DO_SOFTIRQ
524void do_softirq_own_stack(void);
525#else
526static inline void do_softirq_own_stack(void)
527{
528 __do_softirq();
529}
530#endif
531
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300532extern void open_softirq(int nr, void (*action)(struct softirq_action *));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533extern void softirq_init(void);
Steven Rostedtf0696862012-01-25 20:18:55 -0500534extern void __raise_softirq_irqoff(unsigned int nr);
Lai Jiangshan2bf21602010-08-23 18:42:48 +0900535
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800536extern void raise_softirq_irqoff(unsigned int nr);
537extern void raise_softirq(unsigned int nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -0800539DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
540
541static inline struct task_struct *this_cpu_ksoftirqd(void)
542{
543 return this_cpu_read(ksoftirqd);
544}
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546/* Tasklets --- multithreaded analogue of BHs.
547
548 Main feature differing them of generic softirqs: tasklet
549 is running only on one CPU simultaneously.
550
551 Main feature differing them of BHs: different tasklets
552 may be run simultaneously on different CPUs.
553
554 Properties:
555 * If tasklet_schedule() is called, then tasklet is guaranteed
556 to be executed on some cpu at least once after this.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300557 * If the tasklet is already scheduled, but its execution is still not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 started, it will be executed only once.
559 * If this tasklet is already running on another CPU (or schedule is called
560 from tasklet itself), it is rescheduled for later.
561 * Tasklet is strictly serialized wrt itself, but not
562 wrt another tasklets. If client needs some intertask synchronization,
563 he makes it with spinlocks.
564 */
565
566struct tasklet_struct
567{
568 struct tasklet_struct *next;
569 unsigned long state;
570 atomic_t count;
571 void (*func)(unsigned long);
572 unsigned long data;
573};
574
575#define DECLARE_TASKLET(name, func, data) \
576struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
577
578#define DECLARE_TASKLET_DISABLED(name, func, data) \
579struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
580
581
582enum
583{
584 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
585 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
586};
587
588#ifdef CONFIG_SMP
589static inline int tasklet_trylock(struct tasklet_struct *t)
590{
591 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
592}
593
594static inline void tasklet_unlock(struct tasklet_struct *t)
595{
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100596 smp_mb__before_atomic();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 clear_bit(TASKLET_STATE_RUN, &(t)->state);
598}
599
600static inline void tasklet_unlock_wait(struct tasklet_struct *t)
601{
602 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
603}
604#else
605#define tasklet_trylock(t) 1
606#define tasklet_unlock_wait(t) do { } while (0)
607#define tasklet_unlock(t) do { } while (0)
608#endif
609
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800610extern void __tasklet_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612static inline void tasklet_schedule(struct tasklet_struct *t)
613{
614 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
615 __tasklet_schedule(t);
616}
617
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800618extern void __tasklet_hi_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620static inline void tasklet_hi_schedule(struct tasklet_struct *t)
621{
622 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
623 __tasklet_hi_schedule(t);
624}
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626static inline void tasklet_disable_nosync(struct tasklet_struct *t)
627{
628 atomic_inc(&t->count);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100629 smp_mb__after_atomic();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
632static inline void tasklet_disable(struct tasklet_struct *t)
633{
634 tasklet_disable_nosync(t);
635 tasklet_unlock_wait(t);
636 smp_mb();
637}
638
639static inline void tasklet_enable(struct tasklet_struct *t)
640{
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100641 smp_mb__before_atomic();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 atomic_dec(&t->count);
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645extern void tasklet_kill(struct tasklet_struct *t);
646extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
647extern void tasklet_init(struct tasklet_struct *t,
648 void (*func)(unsigned long), unsigned long data);
649
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200650struct tasklet_hrtimer {
651 struct hrtimer timer;
652 struct tasklet_struct tasklet;
653 enum hrtimer_restart (*function)(struct hrtimer *);
654};
655
656extern void
657tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
658 enum hrtimer_restart (*function)(struct hrtimer *),
659 clockid_t which_clock, enum hrtimer_mode mode);
660
661static inline
Thomas Gleixner61699e132015-04-14 21:09:23 +0000662void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
663 const enum hrtimer_mode mode)
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200664{
Thomas Gleixner61699e132015-04-14 21:09:23 +0000665 hrtimer_start(&ttimer->timer, time, mode);
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200666}
667
668static inline
669void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
670{
671 hrtimer_cancel(&ttimer->timer);
672 tasklet_kill(&ttimer->tasklet);
673}
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675/*
676 * Autoprobing for irqs:
677 *
678 * probe_irq_on() and probe_irq_off() provide robust primitives
679 * for accurate IRQ probing during kernel initialization. They are
680 * reasonably simple to use, are not "fooled" by spurious interrupts,
681 * and, unlike other attempts at IRQ probing, they do not get hung on
682 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
683 *
684 * For reasonably foolproof probing, use them as follows:
685 *
686 * 1. clear and/or mask the device's internal interrupt.
687 * 2. sti();
688 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
689 * 4. enable the device and cause it to trigger an interrupt.
690 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
691 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
692 * 7. service the device to clear its pending interrupt.
693 * 8. loop again if paranoia is required.
694 *
695 * probe_irq_on() returns a mask of allocated irq's.
696 *
697 * probe_irq_off() takes the mask as a parameter,
698 * and returns the irq number which occurred,
699 * or zero if none occurred, or a negative irq number
700 * if more than one irq occurred.
701 */
702
Martin Schwidefsky0244ad02013-08-30 09:39:53 +0200703#if !defined(CONFIG_GENERIC_IRQ_PROBE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704static inline unsigned long probe_irq_on(void)
705{
706 return 0;
707}
708static inline int probe_irq_off(unsigned long val)
709{
710 return 0;
711}
712static inline unsigned int probe_irq_mask(unsigned long val)
713{
714 return 0;
715}
716#else
717extern unsigned long probe_irq_on(void); /* returns 0 on failure */
718extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
719extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
720#endif
721
Andrew Morton6168a702007-02-17 21:22:39 -0800722#ifdef CONFIG_PROC_FS
723/* Initialize /proc/irq/ */
724extern void init_irq_proc(void);
725#else
726static inline void init_irq_proc(void)
727{
728}
729#endif
730
Daniel Lezcanob2d3d612017-06-23 16:11:07 +0200731#ifdef CONFIG_IRQ_TIMINGS
732void irq_timings_enable(void);
733void irq_timings_disable(void);
Daniel Lezcanoe1c92142017-06-23 16:11:08 +0200734u64 irq_timings_next_event(u64 now);
Daniel Lezcanob2d3d612017-06-23 16:11:07 +0200735#endif
736
Alexey Dobriyand43c36d2009-10-07 17:09:06 +0400737struct seq_file;
Adrian Bunkf74596d2008-02-06 01:36:35 -0800738int show_interrupts(struct seq_file *p, void *v);
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100739int arch_show_interrupts(struct seq_file *p, int prec);
Adrian Bunkf74596d2008-02-06 01:36:35 -0800740
Yinghai Lu43a25632008-12-28 16:01:13 -0800741extern int early_irq_init(void);
Yinghai Lu4a046d12009-01-12 17:39:24 -0800742extern int arch_probe_nr_irqs(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800743extern int arch_early_irq_init(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800744
Alexander Potapenkobe7635e2016-03-25 14:22:05 -0700745/*
746 * We want to know which function is an entrypoint of a hardirq or a softirq.
747 */
748#define __irq_entry __attribute__((__section__(".irqentry.text")))
749#define __softirq_entry \
750 __attribute__((__section__(".softirqentry.text")))
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752#endif