Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * IRQ subsystem internal functions and variables: |
Thomas Gleixner | dbec07b | 2011-02-07 20:19:55 +0100 | [diff] [blame] | 4 | * |
| 5 | * Do not ever include this file from anything else than |
| 6 | * kernel/irq/. Do not even think about using any information outside |
| 7 | * of this file for your non core code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 9 | #include <linux/irqdesc.h> |
Thomas Gleixner | 8f945a3 | 2014-02-23 21:40:23 +0000 | [diff] [blame] | 10 | #include <linux/kernel_stat.h> |
Jon Hunter | be45beb | 2016-06-07 16:12:29 +0100 | [diff] [blame] | 11 | #include <linux/pm_runtime.h> |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 12 | #include <linux/sched/clock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 14 | #ifdef CONFIG_SPARSE_IRQ |
| 15 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
| 16 | #else |
| 17 | # define IRQ_BITMAP_BITS NR_IRQS |
| 18 | #endif |
| 19 | |
Thomas Gleixner | dbec07b | 2011-02-07 20:19:55 +0100 | [diff] [blame] | 20 | #define istate core_internal_state__do_not_mess_with_it |
| 21 | |
Rusty Russell | 2329abf | 2012-01-13 09:32:18 +1030 | [diff] [blame] | 22 | extern bool noirqdebug; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Mika Westerberg | e509bd7 | 2015-10-05 13:12:15 +0300 | [diff] [blame] | 24 | extern struct irqaction chained_action; |
| 25 | |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 26 | /* |
| 27 | * Bits used by threaded handlers: |
| 28 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 29 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed |
| 30 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 31 | * IRQTF_FORCED_THREAD - irq action is force threaded |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 32 | */ |
| 33 | enum { |
| 34 | IRQTF_RUNTHREAD, |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 35 | IRQTF_WARNED, |
| 36 | IRQTF_AFFINITY, |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 37 | IRQTF_FORCED_THREAD, |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 38 | }; |
| 39 | |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 40 | /* |
Jiang Liu | a257954 | 2014-05-27 16:07:37 +0800 | [diff] [blame] | 41 | * Bit masks for desc->core_internal_state__do_not_mess_with_it |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 42 | * |
| 43 | * IRQS_AUTODETECT - autodetection in progress |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
| 45 | * detection |
Thomas Gleixner | 6954b75 | 2011-02-07 20:55:35 +0100 | [diff] [blame] | 46 | * IRQS_POLL_INPROGRESS - polling in progress |
Thomas Gleixner | 3d67bae | 2011-02-07 21:02:10 +0100 | [diff] [blame] | 47 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
Thomas Gleixner | 163ef30 | 2011-02-08 11:39:15 +0100 | [diff] [blame] | 48 | * IRQS_REPLAY - irq is replayed |
| 49 | * IRQS_WAITING - irq is waiting |
Thomas Gleixner | 2a0d6fb | 2011-02-08 12:17:57 +0100 | [diff] [blame] | 50 | * IRQS_PENDING - irq is pending and replayed later |
Thomas Gleixner | c531e83 | 2011-02-08 12:44:58 +0100 | [diff] [blame] | 51 | * IRQS_SUSPENDED - irq is suspended |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 52 | */ |
| 53 | enum { |
| 54 | IRQS_AUTODETECT = 0x00000001, |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 55 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
Thomas Gleixner | 6954b75 | 2011-02-07 20:55:35 +0100 | [diff] [blame] | 56 | IRQS_POLL_INPROGRESS = 0x00000008, |
Thomas Gleixner | 3d67bae | 2011-02-07 21:02:10 +0100 | [diff] [blame] | 57 | IRQS_ONESHOT = 0x00000020, |
Thomas Gleixner | 163ef30 | 2011-02-08 11:39:15 +0100 | [diff] [blame] | 58 | IRQS_REPLAY = 0x00000040, |
| 59 | IRQS_WAITING = 0x00000080, |
Thomas Gleixner | 2a0d6fb | 2011-02-08 12:17:57 +0100 | [diff] [blame] | 60 | IRQS_PENDING = 0x00000200, |
Thomas Gleixner | c531e83 | 2011-02-08 12:44:58 +0100 | [diff] [blame] | 61 | IRQS_SUSPENDED = 0x00000800, |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 62 | IRQS_TIMINGS = 0x00001000, |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 63 | }; |
| 64 | |
Thomas Gleixner | 1ce6068 | 2011-02-09 20:44:21 +0100 | [diff] [blame] | 65 | #include "debug.h" |
| 66 | #include "settings.h" |
| 67 | |
Jiang Liu | a1ff541 | 2015-06-23 19:47:29 +0200 | [diff] [blame] | 68 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags); |
Jiang Liu | 79ff1cd | 2015-06-23 19:52:36 +0200 | [diff] [blame] | 69 | extern void __disable_irq(struct irq_desc *desc); |
| 70 | extern void __enable_irq(struct irq_desc *desc); |
David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 71 | |
Thomas Gleixner | 4cde9c6 | 2017-06-20 01:37:49 +0200 | [diff] [blame] | 72 | #define IRQ_RESEND true |
| 73 | #define IRQ_NORESEND false |
| 74 | |
| 75 | #define IRQ_START_FORCE true |
| 76 | #define IRQ_START_COND false |
| 77 | |
| 78 | extern int irq_startup(struct irq_desc *desc, bool resend, bool force); |
| 79 | |
Thomas Gleixner | 4699923 | 2011-02-02 21:41:14 +0000 | [diff] [blame] | 80 | extern void irq_shutdown(struct irq_desc *desc); |
Thomas Gleixner | 8792347 | 2011-02-03 12:27:44 +0100 | [diff] [blame] | 81 | extern void irq_enable(struct irq_desc *desc); |
| 82 | extern void irq_disable(struct irq_desc *desc); |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 83 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); |
| 84 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); |
Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 85 | extern void mask_irq(struct irq_desc *desc); |
| 86 | extern void unmask_irq(struct irq_desc *desc); |
Thomas Gleixner | 328a497 | 2014-03-13 19:03:51 +0100 | [diff] [blame] | 87 | extern void unmask_threaded_irq(struct irq_desc *desc); |
Thomas Gleixner | 4699923 | 2011-02-02 21:41:14 +0000 | [diff] [blame] | 88 | |
Thomas Gleixner | f63b6a0 | 2014-05-07 15:44:21 +0000 | [diff] [blame] | 89 | #ifdef CONFIG_SPARSE_IRQ |
| 90 | static inline void irq_mark_irq(unsigned int irq) { } |
| 91 | #else |
| 92 | extern void irq_mark_irq(unsigned int irq); |
| 93 | #endif |
| 94 | |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 95 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
Mike Travis | 0fa0ebb | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 96 | |
Keith Busch | edd14cf | 2016-06-17 16:00:20 -0600 | [diff] [blame] | 97 | irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags); |
Huang Shijie | 71f6434 | 2015-09-02 10:24:55 +0800 | [diff] [blame] | 98 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); |
Thomas Gleixner | 4912609 | 2011-02-07 01:08:49 +0100 | [diff] [blame] | 99 | irqreturn_t handle_irq_event(struct irq_desc *desc); |
| 100 | |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 101 | /* Resending of interrupts :*/ |
Jiang Liu | 0798abe | 2015-06-04 12:13:27 +0800 | [diff] [blame] | 102 | void check_irq_resend(struct irq_desc *desc); |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 103 | bool irq_wait_for_poll(struct irq_desc *desc); |
Thomas Gleixner | a92444c | 2014-02-15 00:55:19 +0000 | [diff] [blame] | 104 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 105 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | #ifdef CONFIG_PROC_FS |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 107 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 108 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
| 110 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); |
| 111 | #else |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 112 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 113 | static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | static inline void register_handler_proc(unsigned int irq, |
| 115 | struct irqaction *action) { } |
| 116 | static inline void unregister_handler_proc(unsigned int irq, |
| 117 | struct irqaction *action) { } |
| 118 | #endif |
| 119 | |
Thomas Gleixner | 9c25558 | 2016-07-04 17:39:23 +0900 | [diff] [blame] | 120 | extern bool irq_can_set_affinity_usr(unsigned int irq); |
| 121 | |
Thomas Gleixner | cba4235 | 2017-06-20 01:37:21 +0200 | [diff] [blame] | 122 | extern int irq_select_affinity_usr(unsigned int irq); |
Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 123 | |
Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 124 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 125 | |
Jiang Liu | 818b0f3 | 2012-03-30 23:11:34 +0800 | [diff] [blame] | 126 | extern int irq_do_set_affinity(struct irq_data *data, |
| 127 | const struct cpumask *dest, bool force); |
| 128 | |
Thomas Gleixner | 43564bd | 2017-06-20 01:37:22 +0200 | [diff] [blame] | 129 | #ifdef CONFIG_SMP |
| 130 | extern int irq_setup_affinity(struct irq_desc *desc); |
| 131 | #else |
| 132 | static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } |
| 133 | #endif |
| 134 | |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 135 | /* Inline functions for support of irq chips on slow busses */ |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 136 | static inline void chip_bus_lock(struct irq_desc *desc) |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 137 | { |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 138 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) |
| 139 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 140 | } |
| 141 | |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 142 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 143 | { |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 144 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) |
| 145 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 146 | } |
| 147 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 148 | #define _IRQ_DESC_CHECK (1 << 0) |
| 149 | #define _IRQ_DESC_PERCPU (1 << 1) |
| 150 | |
| 151 | #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) |
| 152 | #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) |
| 153 | |
Daniel Lezcano | f944b5a | 2016-01-14 10:54:13 +0100 | [diff] [blame] | 154 | #define for_each_action_of_desc(desc, act) \ |
Masahiro Yamada | 163616c | 2017-08-09 15:32:21 +0900 | [diff] [blame] | 155 | for (act = desc->action; act; act = act->next) |
Daniel Lezcano | f944b5a | 2016-01-14 10:54:13 +0100 | [diff] [blame] | 156 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 157 | struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 158 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 159 | unsigned int check); |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 160 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); |
| 161 | |
| 162 | static inline struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 163 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 164 | { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 165 | return __irq_get_desc_lock(irq, flags, true, check); |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | static inline void |
| 169 | irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) |
| 170 | { |
| 171 | __irq_put_desc_unlock(desc, flags, true); |
| 172 | } |
| 173 | |
| 174 | static inline struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 175 | irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 176 | { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 177 | return __irq_get_desc_lock(irq, flags, false, check); |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | static inline void |
| 181 | irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) |
| 182 | { |
| 183 | __irq_put_desc_unlock(desc, flags, false); |
| 184 | } |
| 185 | |
Boqun Feng | b354286 | 2015-12-29 12:18:48 +0800 | [diff] [blame] | 186 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
| 187 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 188 | static inline unsigned int irqd_get(struct irq_data *d) |
| 189 | { |
| 190 | return __irqd_to_state(d); |
| 191 | } |
| 192 | |
Ingo Molnar | 43f7775 | 2006-06-29 02:24:58 -0700 | [diff] [blame] | 193 | /* |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 194 | * Manipulation functions for irq_data.state |
| 195 | */ |
| 196 | static inline void irqd_set_move_pending(struct irq_data *d) |
| 197 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 198 | __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | static inline void irqd_clr_move_pending(struct irq_data *d) |
| 202 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 203 | __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 204 | } |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 205 | |
Thomas Gleixner | 54fdf6a | 2017-06-20 01:37:47 +0200 | [diff] [blame] | 206 | static inline void irqd_set_managed_shutdown(struct irq_data *d) |
| 207 | { |
| 208 | __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; |
| 209 | } |
| 210 | |
| 211 | static inline void irqd_clr_managed_shutdown(struct irq_data *d) |
| 212 | { |
| 213 | __irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN; |
| 214 | } |
| 215 | |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 216 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
| 217 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 218 | __irqd_to_state(d) &= ~mask; |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 219 | } |
| 220 | |
| 221 | static inline void irqd_set(struct irq_data *d, unsigned int mask) |
| 222 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 223 | __irqd_to_state(d) |= mask; |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 224 | } |
| 225 | |
Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 226 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) |
| 227 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 228 | return __irqd_to_state(d) & mask; |
Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 229 | } |
Thomas Gleixner | 8f945a3 | 2014-02-23 21:40:23 +0000 | [diff] [blame] | 230 | |
Juergen Gross | a696712 | 2017-07-17 19:47:02 +0200 | [diff] [blame] | 231 | static inline void irq_state_set_disabled(struct irq_desc *desc) |
| 232 | { |
| 233 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
| 234 | } |
| 235 | |
| 236 | static inline void irq_state_set_masked(struct irq_desc *desc) |
| 237 | { |
| 238 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
| 239 | } |
| 240 | |
Boqun Feng | b354286 | 2015-12-29 12:18:48 +0800 | [diff] [blame] | 241 | #undef __irqd_to_state |
| 242 | |
Jiang Liu | b51bf95 | 2015-06-04 12:13:25 +0800 | [diff] [blame] | 243 | static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
Thomas Gleixner | 8f945a3 | 2014-02-23 21:40:23 +0000 | [diff] [blame] | 244 | { |
| 245 | __this_cpu_inc(*desc->kstat_irqs); |
| 246 | __this_cpu_inc(kstat.irqs_sum); |
| 247 | } |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 248 | |
Jiang Liu | 6783011 | 2015-06-01 16:05:13 +0800 | [diff] [blame] | 249 | static inline int irq_desc_get_node(struct irq_desc *desc) |
| 250 | { |
Jiang Liu | 449e9ca | 2015-06-01 16:05:16 +0800 | [diff] [blame] | 251 | return irq_common_data_get_node(&desc->irq_common_data); |
Jiang Liu | 6783011 | 2015-06-01 16:05:13 +0800 | [diff] [blame] | 252 | } |
| 253 | |
Grygorii Strashko | 4717f13 | 2015-11-10 11:58:12 +0200 | [diff] [blame] | 254 | static inline int irq_desc_is_chained(struct irq_desc *desc) |
| 255 | { |
| 256 | return (desc->action && desc->action == &chained_action); |
| 257 | } |
| 258 | |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 259 | #ifdef CONFIG_PM_SLEEP |
Thomas Gleixner | 9ce7a25 | 2014-08-29 14:00:16 +0200 | [diff] [blame] | 260 | bool irq_pm_check_wakeup(struct irq_desc *desc); |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 261 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); |
| 262 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); |
| 263 | #else |
Thomas Gleixner | 9ce7a25 | 2014-08-29 14:00:16 +0200 | [diff] [blame] | 264 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 265 | static inline void |
| 266 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } |
| 267 | static inline void |
| 268 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } |
| 269 | #endif |
Bartosz Golaszewski | f160203 | 2017-05-31 18:06:58 +0200 | [diff] [blame] | 270 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 271 | #ifdef CONFIG_IRQ_TIMINGS |
| 272 | |
| 273 | #define IRQ_TIMINGS_SHIFT 5 |
| 274 | #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT) |
| 275 | #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1) |
| 276 | |
| 277 | /** |
| 278 | * struct irq_timings - irq timings storing structure |
| 279 | * @values: a circular buffer of u64 encoded <timestamp,irq> values |
| 280 | * @count: the number of elements in the array |
| 281 | */ |
| 282 | struct irq_timings { |
| 283 | u64 values[IRQ_TIMINGS_SIZE]; |
| 284 | int count; |
| 285 | }; |
| 286 | |
| 287 | DECLARE_PER_CPU(struct irq_timings, irq_timings); |
| 288 | |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 289 | extern void irq_timings_free(int irq); |
| 290 | extern int irq_timings_alloc(int irq); |
| 291 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 292 | static inline void irq_remove_timings(struct irq_desc *desc) |
| 293 | { |
| 294 | desc->istate &= ~IRQS_TIMINGS; |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 295 | |
| 296 | irq_timings_free(irq_desc_get_irq(desc)); |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act) |
| 300 | { |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 301 | int irq = irq_desc_get_irq(desc); |
| 302 | int ret; |
| 303 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 304 | /* |
| 305 | * We don't need the measurement because the idle code already |
| 306 | * knows the next expiry event. |
| 307 | */ |
| 308 | if (act->flags & __IRQF_TIMER) |
| 309 | return; |
| 310 | |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 311 | /* |
| 312 | * In case the timing allocation fails, we just want to warn, |
| 313 | * not fail, so letting the system boot anyway. |
| 314 | */ |
| 315 | ret = irq_timings_alloc(irq); |
| 316 | if (ret) { |
| 317 | pr_warn("Failed to allocate irq timing stats for irq%d (%d)", |
| 318 | irq, ret); |
| 319 | return; |
| 320 | } |
| 321 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 322 | desc->istate |= IRQS_TIMINGS; |
| 323 | } |
| 324 | |
| 325 | extern void irq_timings_enable(void); |
| 326 | extern void irq_timings_disable(void); |
| 327 | |
| 328 | DECLARE_STATIC_KEY_FALSE(irq_timing_enabled); |
| 329 | |
| 330 | /* |
| 331 | * The interrupt number and the timestamp are encoded into a single |
| 332 | * u64 variable to optimize the size. |
| 333 | * 48 bit time stamp and 16 bit IRQ number is way sufficient. |
| 334 | * Who cares an IRQ after 78 hours of idle time? |
| 335 | */ |
| 336 | static inline u64 irq_timing_encode(u64 timestamp, int irq) |
| 337 | { |
| 338 | return (timestamp << 16) | irq; |
| 339 | } |
| 340 | |
| 341 | static inline int irq_timing_decode(u64 value, u64 *timestamp) |
| 342 | { |
| 343 | *timestamp = value >> 16; |
| 344 | return value & U16_MAX; |
| 345 | } |
| 346 | |
| 347 | /* |
| 348 | * The function record_irq_time is only called in one place in the |
| 349 | * interrupts handler. We want this function always inline so the code |
| 350 | * inside is embedded in the function and the static key branching |
| 351 | * code can act at the higher level. Without the explicit |
| 352 | * __always_inline we can end up with a function call and a small |
| 353 | * overhead in the hotpath for nothing. |
| 354 | */ |
| 355 | static __always_inline void record_irq_time(struct irq_desc *desc) |
| 356 | { |
| 357 | if (!static_branch_likely(&irq_timing_enabled)) |
| 358 | return; |
| 359 | |
| 360 | if (desc->istate & IRQS_TIMINGS) { |
| 361 | struct irq_timings *timings = this_cpu_ptr(&irq_timings); |
| 362 | |
| 363 | timings->values[timings->count & IRQ_TIMINGS_MASK] = |
| 364 | irq_timing_encode(local_clock(), |
| 365 | irq_desc_get_irq(desc)); |
| 366 | |
| 367 | timings->count++; |
| 368 | } |
| 369 | } |
| 370 | #else |
| 371 | static inline void irq_remove_timings(struct irq_desc *desc) {} |
| 372 | static inline void irq_setup_timings(struct irq_desc *desc, |
| 373 | struct irqaction *act) {}; |
| 374 | static inline void record_irq_time(struct irq_desc *desc) {} |
| 375 | #endif /* CONFIG_IRQ_TIMINGS */ |
| 376 | |
| 377 | |
Bartosz Golaszewski | f160203 | 2017-05-31 18:06:58 +0200 | [diff] [blame] | 378 | #ifdef CONFIG_GENERIC_IRQ_CHIP |
| 379 | void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, |
| 380 | int num_ct, unsigned int irq_base, |
| 381 | void __iomem *reg_base, irq_flow_handler_t handler); |
| 382 | #else |
| 383 | static inline void |
| 384 | irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, |
| 385 | int num_ct, unsigned int irq_base, |
| 386 | void __iomem *reg_base, irq_flow_handler_t handler) { } |
| 387 | #endif /* CONFIG_GENERIC_IRQ_CHIP */ |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 388 | |
Christoph Hellwig | 137221d | 2017-06-20 01:37:24 +0200 | [diff] [blame] | 389 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 390 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
| 391 | { |
| 392 | return irqd_can_move_in_process_context(data); |
| 393 | } |
| 394 | static inline bool irq_move_pending(struct irq_data *data) |
| 395 | { |
| 396 | return irqd_is_setaffinity_pending(data); |
| 397 | } |
| 398 | static inline void |
| 399 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
| 400 | { |
| 401 | cpumask_copy(desc->pending_mask, mask); |
| 402 | } |
| 403 | static inline void |
| 404 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
| 405 | { |
| 406 | cpumask_copy(mask, desc->pending_mask); |
| 407 | } |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 408 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
| 409 | { |
| 410 | return desc->pending_mask; |
| 411 | } |
Thomas Gleixner | 36d84fb | 2017-06-20 01:37:34 +0200 | [diff] [blame] | 412 | bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); |
Christoph Hellwig | 137221d | 2017-06-20 01:37:24 +0200 | [diff] [blame] | 413 | #else /* CONFIG_GENERIC_PENDING_IRQ */ |
| 414 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
| 415 | { |
| 416 | return true; |
| 417 | } |
| 418 | static inline bool irq_move_pending(struct irq_data *data) |
| 419 | { |
| 420 | return false; |
| 421 | } |
| 422 | static inline void |
| 423 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
| 424 | { |
| 425 | } |
| 426 | static inline void |
| 427 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
| 428 | { |
| 429 | } |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 430 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
| 431 | { |
| 432 | return NULL; |
| 433 | } |
Thomas Gleixner | 36d84fb | 2017-06-20 01:37:34 +0200 | [diff] [blame] | 434 | static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) |
| 435 | { |
| 436 | return false; |
| 437 | } |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 438 | #endif /* !CONFIG_GENERIC_PENDING_IRQ */ |
Christoph Hellwig | 137221d | 2017-06-20 01:37:24 +0200 | [diff] [blame] | 439 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 440 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
Thomas Gleixner | c2ce34c | 2017-06-24 11:05:59 +0200 | [diff] [blame] | 441 | #include <linux/debugfs.h> |
| 442 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 443 | void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc); |
Thomas Gleixner | c2ce34c | 2017-06-24 11:05:59 +0200 | [diff] [blame] | 444 | static inline void irq_remove_debugfs_entry(struct irq_desc *desc) |
| 445 | { |
| 446 | debugfs_remove(desc->debugfs_file); |
| 447 | } |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 448 | # ifdef CONFIG_IRQ_DOMAIN |
| 449 | void irq_domain_debugfs_init(struct dentry *root); |
| 450 | # else |
Sebastian Ott | e5682b4 | 2017-07-04 11:25:15 +0200 | [diff] [blame] | 451 | static inline void irq_domain_debugfs_init(struct dentry *root) |
| 452 | { |
| 453 | } |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 454 | # endif |
| 455 | #else /* CONFIG_GENERIC_IRQ_DEBUGFS */ |
| 456 | static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d) |
| 457 | { |
| 458 | } |
| 459 | static inline void irq_remove_debugfs_entry(struct irq_desc *d) |
| 460 | { |
| 461 | } |
| 462 | #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */ |