blob: bc1331f84fb5f11d5845ec191f89fe6fd62ad82c [file] [log] [blame]
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -07001/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
Michael Ellerman7fe37302007-04-18 19:39:21 +100014#include <linux/msi.h>
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070015#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
Jiang Liuf8264e32014-11-06 22:20:14 +080018#include <linux/irqdomain.h>
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070019
Steven Rostedtf0696862012-01-25 20:18:55 -050020#include <trace/events/irq.h>
21
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070022#include "internals.h"
23
Mika Westerberge509bd72015-10-05 13:12:15 +030024static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25{
26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 return IRQ_NONE;
28}
29
30/*
31 * Chained handlers should never call action on their IRQ. This default
32 * action will emit warning if such thing happens.
33 */
34struct irqaction chained_action = {
35 .handler = bad_chained_irq,
36};
37
Eric W. Biederman3a16d712006-10-04 02:16:37 -070038/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010039 * irq_set_chip - set the irq chip for an irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070040 * @irq: irq number
41 * @chip: pointer to irq chip description structure
42 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010043int irq_set_chip(unsigned int irq, struct irq_chip *chip)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070044{
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070045 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +010046 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070047
Thomas Gleixner02725e72011-02-12 10:37:36 +010048 if (!desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070049 return -EINVAL;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070050
51 if (!chip)
52 chip = &no_irq_chip;
53
Thomas Gleixner6b8ff312010-10-01 12:58:38 +020054 desc->irq_data.chip = chip;
Thomas Gleixner02725e72011-02-12 10:37:36 +010055 irq_put_desc_unlock(desc, flags);
David Daneyd72274e2011-03-25 12:38:48 -070056 /*
57 * For !CONFIG_SPARSE_IRQ make the irq show up in
Thomas Gleixnerf63b6a02014-05-07 15:44:21 +000058 * allocated_irqs.
David Daneyd72274e2011-03-25 12:38:48 -070059 */
Thomas Gleixnerf63b6a02014-05-07 15:44:21 +000060 irq_mark_irq(irq);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070061 return 0;
62}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010063EXPORT_SYMBOL(irq_set_chip);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070064
65/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010066 * irq_set_type - set the irq trigger type for an irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070067 * @irq: irq number
David Brownell0c5d1eb2008-10-01 14:46:18 -070068 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070069 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010070int irq_set_irq_type(unsigned int irq, unsigned int type)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070071{
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070072 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +010073 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Thomas Gleixner02725e72011-02-12 10:37:36 +010074 int ret = 0;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070075
Thomas Gleixner02725e72011-02-12 10:37:36 +010076 if (!desc)
77 return -EINVAL;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070078
Jiang Liua1ff5412015-06-23 19:47:29 +020079 ret = __irq_set_trigger(desc, type);
Thomas Gleixner02725e72011-02-12 10:37:36 +010080 irq_put_desc_busunlock(desc, flags);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070081 return ret;
82}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010083EXPORT_SYMBOL(irq_set_irq_type);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070084
85/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010086 * irq_set_handler_data - set irq handler data for an irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070087 * @irq: Interrupt number
88 * @data: Pointer to interrupt specific data
89 *
90 * Set the hardware irq controller data for an irq
91 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +010092int irq_set_handler_data(unsigned int irq, void *data)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070093{
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070094 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +010095 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070096
Thomas Gleixner02725e72011-02-12 10:37:36 +010097 if (!desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -070098 return -EINVAL;
Jiang Liuaf7080e2015-06-01 16:05:21 +080099 desc->irq_common_data.handler_data = data;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100100 irq_put_desc_unlock(desc, flags);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700101 return 0;
102}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100103EXPORT_SYMBOL(irq_set_handler_data);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700104
105/**
Alexander Gordeev51906e72012-11-19 16:01:29 +0100106 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
107 * @irq_base: Interrupt number base
108 * @irq_offset: Interrupt number offset
109 * @entry: Pointer to MSI descriptor data
110 *
111 * Set the MSI descriptor entry for an irq at offset
112 */
113int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
114 struct msi_desc *entry)
115{
116 unsigned long flags;
117 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118
119 if (!desc)
120 return -EINVAL;
Jiang Liub2377212015-06-01 16:05:43 +0800121 desc->irq_common_data.msi_desc = entry;
Alexander Gordeev51906e72012-11-19 16:01:29 +0100122 if (entry && !irq_offset)
123 entry->irq = irq_base;
124 irq_put_desc_unlock(desc, flags);
125 return 0;
126}
127
128/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100129 * irq_set_msi_desc - set MSI descriptor data for an irq
Eric W. Biederman5b912c12007-01-28 12:52:03 -0700130 * @irq: Interrupt number
Randy Dunlap472900b2007-02-16 01:28:25 -0800131 * @entry: Pointer to MSI descriptor data
Eric W. Biederman5b912c12007-01-28 12:52:03 -0700132 *
Liuweni24b26d42009-11-04 20:11:05 +0800133 * Set the MSI descriptor entry for an irq
Eric W. Biederman5b912c12007-01-28 12:52:03 -0700134 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100135int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
Eric W. Biederman5b912c12007-01-28 12:52:03 -0700136{
Alexander Gordeev51906e72012-11-19 16:01:29 +0100137 return irq_set_msi_desc_off(irq, 0, entry);
Eric W. Biederman5b912c12007-01-28 12:52:03 -0700138}
139
140/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100141 * irq_set_chip_data - set irq chip data for an irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700142 * @irq: Interrupt number
143 * @data: Pointer to chip specific data
144 *
145 * Set the hardware irq chip data for an irq
146 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100147int irq_set_chip_data(unsigned int irq, void *data)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700148{
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700149 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100150 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700151
Thomas Gleixner02725e72011-02-12 10:37:36 +0100152 if (!desc)
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700153 return -EINVAL;
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200154 desc->irq_data.chip_data = data;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100155 irq_put_desc_unlock(desc, flags);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700156 return 0;
157}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100158EXPORT_SYMBOL(irq_set_chip_data);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700159
Thomas Gleixnerf303a6d2010-09-28 17:34:01 +0200160struct irq_data *irq_get_irq_data(unsigned int irq)
161{
162 struct irq_desc *desc = irq_to_desc(irq);
163
164 return desc ? &desc->irq_data : NULL;
165}
166EXPORT_SYMBOL_GPL(irq_get_irq_data);
167
Thomas Gleixnerc1594b72011-02-07 22:11:30 +0100168static void irq_state_clr_disabled(struct irq_desc *desc)
169{
Thomas Gleixner801a0e92011-03-27 11:02:49 +0200170 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
Thomas Gleixnerc1594b72011-02-07 22:11:30 +0100171}
172
173static void irq_state_set_disabled(struct irq_desc *desc)
174{
Thomas Gleixner801a0e92011-03-27 11:02:49 +0200175 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
Thomas Gleixnerc1594b72011-02-07 22:11:30 +0100176}
177
Thomas Gleixner6e402622011-02-08 12:36:06 +0100178static void irq_state_clr_masked(struct irq_desc *desc)
179{
Thomas Gleixner32f41252011-03-28 14:10:52 +0200180 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
Thomas Gleixner6e402622011-02-08 12:36:06 +0100181}
182
183static void irq_state_set_masked(struct irq_desc *desc)
184{
Thomas Gleixner32f41252011-03-28 14:10:52 +0200185 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
Thomas Gleixner6e402622011-02-08 12:36:06 +0100186}
187
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200188static void irq_state_clr_started(struct irq_desc *desc)
189{
190 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
191}
192
193static void irq_state_set_started(struct irq_desc *desc)
194{
195 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
196}
197
Thomas Gleixnerb4bc7242012-02-08 11:57:52 +0100198int irq_startup(struct irq_desc *desc, bool resend)
Thomas Gleixner46999232011-02-02 21:41:14 +0000199{
Thomas Gleixnerb4bc7242012-02-08 11:57:52 +0100200 int ret = 0;
201
Thomas Gleixner46999232011-02-02 21:41:14 +0000202 desc->depth = 0;
203
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200204 if (irqd_is_started(&desc->irq_data)) {
Thomas Gleixnerb4bc7242012-02-08 11:57:52 +0100205 irq_enable(desc);
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200206 } else {
207 irq_domain_activate_irq(&desc->irq_data);
208 if (desc->irq_data.chip->irq_startup) {
209 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
210 irq_state_clr_disabled(desc);
211 irq_state_clr_masked(desc);
212 } else {
213 irq_enable(desc);
214 }
215 irq_state_set_started(desc);
Thomas Gleixner3aae9942011-02-04 10:17:52 +0100216 }
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200217
Thomas Gleixnerb4bc7242012-02-08 11:57:52 +0100218 if (resend)
Jiang Liu0798abe2015-06-04 12:13:27 +0800219 check_irq_resend(desc);
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200220
Thomas Gleixnerb4bc7242012-02-08 11:57:52 +0100221 return ret;
Thomas Gleixner46999232011-02-02 21:41:14 +0000222}
223
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200224static void __irq_disable(struct irq_desc *desc, bool mask);
225
Thomas Gleixner46999232011-02-02 21:41:14 +0000226void irq_shutdown(struct irq_desc *desc)
227{
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200228 if (irqd_is_started(&desc->irq_data)) {
229 desc->depth = 1;
230 if (desc->irq_data.chip->irq_shutdown) {
231 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
232 irq_state_set_disabled(desc);
233 irq_state_set_masked(desc);
234 } else {
235 __irq_disable(desc, true);
236 }
237 irq_state_clr_started(desc);
238 }
239 /*
240 * This must be called even if the interrupt was never started up,
241 * because the activation can happen before the interrupt is
242 * available for request/startup. It has it's own state tracking so
243 * it's safe to call it unconditionally.
244 */
Jiang Liuf8264e32014-11-06 22:20:14 +0800245 irq_domain_deactivate_irq(&desc->irq_data);
Thomas Gleixner46999232011-02-02 21:41:14 +0000246}
247
Thomas Gleixner87923472011-02-03 12:27:44 +0100248void irq_enable(struct irq_desc *desc)
249{
Thomas Gleixnerc1594b72011-02-07 22:11:30 +0100250 irq_state_clr_disabled(desc);
Thomas Gleixner50f7c032011-02-03 13:23:54 +0100251 if (desc->irq_data.chip->irq_enable)
252 desc->irq_data.chip->irq_enable(&desc->irq_data);
253 else
254 desc->irq_data.chip->irq_unmask(&desc->irq_data);
Thomas Gleixner6e402622011-02-08 12:36:06 +0100255 irq_state_clr_masked(desc);
Thomas Gleixner87923472011-02-03 12:27:44 +0100256}
257
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200258static void __irq_disable(struct irq_desc *desc, bool mask)
259{
260 irq_state_set_disabled(desc);
261 if (desc->irq_data.chip->irq_disable) {
262 desc->irq_data.chip->irq_disable(&desc->irq_data);
263 irq_state_set_masked(desc);
264 } else if (mask) {
265 mask_irq(desc);
266 }
267}
268
Andreas Fenkartd671a602013-05-10 12:21:30 +0200269/**
Xie XiuQif788e7b2013-10-18 09:12:04 +0800270 * irq_disable - Mark interrupt disabled
Andreas Fenkartd671a602013-05-10 12:21:30 +0200271 * @desc: irq descriptor which should be disabled
272 *
273 * If the chip does not implement the irq_disable callback, we
274 * use a lazy disable approach. That means we mark the interrupt
275 * disabled, but leave the hardware unmasked. That's an
276 * optimization because we avoid the hardware access for the
277 * common case where no interrupt happens after we marked it
278 * disabled. If an interrupt happens, then the interrupt flow
279 * handler masks the line at the hardware level and marks it
280 * pending.
Thomas Gleixnere9849772015-10-09 23:28:58 +0200281 *
282 * If the interrupt chip does not implement the irq_disable callback,
283 * a driver can disable the lazy approach for a particular irq line by
284 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
285 * be used for devices which cannot disable the interrupt at the
286 * device level under certain circumstances and have to use
287 * disable_irq[_nosync] instead.
Andreas Fenkartd671a602013-05-10 12:21:30 +0200288 */
Thomas Gleixner87923472011-02-03 12:27:44 +0100289void irq_disable(struct irq_desc *desc)
290{
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200291 __irq_disable(desc, irq_settings_disable_unlazy(desc));
Thomas Gleixner89d694b2008-02-18 18:25:17 +0100292}
293
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100294void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
295{
296 if (desc->irq_data.chip->irq_enable)
297 desc->irq_data.chip->irq_enable(&desc->irq_data);
298 else
299 desc->irq_data.chip->irq_unmask(&desc->irq_data);
300 cpumask_set_cpu(cpu, desc->percpu_enabled);
301}
302
303void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
304{
305 if (desc->irq_data.chip->irq_disable)
306 desc->irq_data.chip->irq_disable(&desc->irq_data);
307 else
308 desc->irq_data.chip->irq_mask(&desc->irq_data);
309 cpumask_clear_cpu(cpu, desc->percpu_enabled);
310}
311
Thomas Gleixner9205e312010-09-27 12:44:50 +0000312static inline void mask_ack_irq(struct irq_desc *desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700313{
Thomas Gleixner9205e312010-09-27 12:44:50 +0000314 if (desc->irq_data.chip->irq_mask_ack)
315 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700316 else {
Thomas Gleixnere2c0f8f2010-09-27 12:44:42 +0000317 desc->irq_data.chip->irq_mask(&desc->irq_data);
Thomas Gleixner22a49162010-09-27 12:44:47 +0000318 if (desc->irq_data.chip->irq_ack)
319 desc->irq_data.chip->irq_ack(&desc->irq_data);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700320 }
Thomas Gleixner6e402622011-02-08 12:36:06 +0100321 irq_state_set_masked(desc);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100322}
323
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100324void mask_irq(struct irq_desc *desc)
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100325{
Thomas Gleixnere2c0f8f2010-09-27 12:44:42 +0000326 if (desc->irq_data.chip->irq_mask) {
327 desc->irq_data.chip->irq_mask(&desc->irq_data);
Thomas Gleixner6e402622011-02-08 12:36:06 +0100328 irq_state_set_masked(desc);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100329 }
330}
331
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100332void unmask_irq(struct irq_desc *desc)
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100333{
Thomas Gleixner0eda58b2010-09-27 12:44:44 +0000334 if (desc->irq_data.chip->irq_unmask) {
335 desc->irq_data.chip->irq_unmask(&desc->irq_data);
Thomas Gleixner6e402622011-02-08 12:36:06 +0100336 irq_state_clr_masked(desc);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100337 }
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700338}
339
Thomas Gleixner328a4972014-03-13 19:03:51 +0100340void unmask_threaded_irq(struct irq_desc *desc)
341{
342 struct irq_chip *chip = desc->irq_data.chip;
343
344 if (chip->flags & IRQCHIP_EOI_THREADED)
345 chip->irq_eoi(&desc->irq_data);
346
347 if (chip->irq_unmask) {
348 chip->irq_unmask(&desc->irq_data);
349 irq_state_clr_masked(desc);
350 }
351}
352
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200353/*
354 * handle_nested_irq - Handle a nested irq from a irq thread
355 * @irq: the interrupt number
356 *
357 * Handle interrupts which are nested into a threaded interrupt
358 * handler. The handler function is called inside the calling
359 * threads context.
360 */
361void handle_nested_irq(unsigned int irq)
362{
363 struct irq_desc *desc = irq_to_desc(irq);
364 struct irqaction *action;
365 irqreturn_t action_ret;
366
367 might_sleep();
368
Thomas Gleixner239007b2009-11-17 16:46:45 +0100369 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200370
Thomas Gleixner293a7a02012-10-16 15:07:49 -0700371 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200372
373 action = desc->action;
Ning Jiang23812b92012-05-22 00:19:20 +0800374 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
375 desc->istate |= IRQS_PENDING;
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200376 goto out_unlock;
Ning Jiang23812b92012-05-22 00:19:20 +0800377 }
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200378
Sudeep Hollaa946e8c2015-11-04 18:32:37 +0000379 kstat_incr_irqs_this_cpu(desc);
Thomas Gleixner32f41252011-03-28 14:10:52 +0200380 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100381 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200382
Charles Keepax45e52022017-03-07 16:28:18 +0000383 action_ret = IRQ_NONE;
384 for_each_action_of_desc(desc, action)
385 action_ret |= action->thread_fn(action->irq, action->dev_id);
386
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200387 if (!noirqdebug)
Jiang Liu0dcdbc92015-06-04 12:13:28 +0800388 note_interrupt(desc, action_ret);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200389
Thomas Gleixner239007b2009-11-17 16:46:45 +0100390 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner32f41252011-03-28 14:10:52 +0200391 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200392
393out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100394 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200395}
396EXPORT_SYMBOL_GPL(handle_nested_irq);
397
Thomas Gleixnerfe200ae2011-02-07 10:34:30 +0100398static bool irq_check_poll(struct irq_desc *desc)
399{
Thomas Gleixner6954b752011-02-07 20:55:35 +0100400 if (!(desc->istate & IRQS_POLL_INPROGRESS))
Thomas Gleixnerfe200ae2011-02-07 10:34:30 +0100401 return false;
402 return irq_wait_for_poll(desc);
403}
404
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200405static bool irq_may_run(struct irq_desc *desc)
406{
Thomas Gleixner9ce7a252014-08-29 14:00:16 +0200407 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
408
409 /*
410 * If the interrupt is not in progress and is not an armed
411 * wakeup interrupt, proceed.
412 */
413 if (!irqd_has_set(&desc->irq_data, mask))
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200414 return true;
Thomas Gleixner9ce7a252014-08-29 14:00:16 +0200415
416 /*
417 * If the interrupt is an armed wakeup source, mark it pending
418 * and suspended, disable it and notify the pm core about the
419 * event.
420 */
421 if (irq_pm_check_wakeup(desc))
422 return false;
423
424 /*
425 * Handle a potential concurrent poll on a different core.
426 */
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200427 return irq_check_poll(desc);
428}
429
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700430/**
431 * handle_simple_irq - Simple and software-decoded IRQs.
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700432 * @desc: the interrupt description structure for this irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700433 *
434 * Simple interrupts are either sent from a demultiplexing interrupt
435 * handler or come from hardware, where no interrupt hardware control
436 * is necessary.
437 *
438 * Note: The caller is expected to handle the ack, clear, mask and
439 * unmask issues if necessary.
440 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200441void handle_simple_irq(struct irq_desc *desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700442{
Thomas Gleixner239007b2009-11-17 16:46:45 +0100443 raw_spin_lock(&desc->lock);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700444
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200445 if (!irq_may_run(desc))
446 goto out_unlock;
Thomas Gleixnerfe200ae2011-02-07 10:34:30 +0100447
Thomas Gleixner163ef302011-02-08 11:39:15 +0100448 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700449
Ning Jiang23812b92012-05-22 00:19:20 +0800450 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
451 desc->istate |= IRQS_PENDING;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700452 goto out_unlock;
Ning Jiang23812b92012-05-22 00:19:20 +0800453 }
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700454
Sudeep Hollaa946e8c2015-11-04 18:32:37 +0000455 kstat_incr_irqs_this_cpu(desc);
Thomas Gleixner107781e2011-02-07 01:21:02 +0100456 handle_irq_event(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700457
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700458out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100459 raw_spin_unlock(&desc->lock);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700460}
Jonathan Cameronedf76f82011-05-18 10:39:04 +0100461EXPORT_SYMBOL_GPL(handle_simple_irq);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700462
Keith Buschedd14cf2016-06-17 16:00:20 -0600463/**
464 * handle_untracked_irq - Simple and software-decoded IRQs.
465 * @desc: the interrupt description structure for this irq
466 *
467 * Untracked interrupts are sent from a demultiplexing interrupt
468 * handler when the demultiplexer does not know which device it its
469 * multiplexed irq domain generated the interrupt. IRQ's handled
470 * through here are not subjected to stats tracking, randomness, or
471 * spurious interrupt detection.
472 *
473 * Note: Like handle_simple_irq, the caller is expected to handle
474 * the ack, clear, mask and unmask issues if necessary.
475 */
476void handle_untracked_irq(struct irq_desc *desc)
477{
478 unsigned int flags = 0;
479
480 raw_spin_lock(&desc->lock);
481
482 if (!irq_may_run(desc))
483 goto out_unlock;
484
485 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
486
487 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
488 desc->istate |= IRQS_PENDING;
489 goto out_unlock;
490 }
491
492 desc->istate &= ~IRQS_PENDING;
493 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
494 raw_spin_unlock(&desc->lock);
495
496 __handle_irq_event_percpu(desc, &flags);
497
498 raw_spin_lock(&desc->lock);
499 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
500
501out_unlock:
502 raw_spin_unlock(&desc->lock);
503}
504EXPORT_SYMBOL_GPL(handle_untracked_irq);
505
Thomas Gleixnerac563762012-02-07 17:58:03 +0100506/*
507 * Called unconditionally from handle_level_irq() and only for oneshot
508 * interrupts from handle_fasteoi_irq()
509 */
510static void cond_unmask_irq(struct irq_desc *desc)
511{
512 /*
513 * We need to unmask in the following cases:
514 * - Standard level irq (IRQF_ONESHOT is not set)
515 * - Oneshot irq which did not wake the thread (caused by a
516 * spurious interrupt or a primary handler handling it
517 * completely).
518 */
519 if (!irqd_irq_disabled(&desc->irq_data) &&
520 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
521 unmask_irq(desc);
522}
523
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700524/**
525 * handle_level_irq - Level type irq handler
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700526 * @desc: the interrupt description structure for this irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700527 *
528 * Level type interrupts are active as long as the hardware line has
529 * the active level. This may require to mask the interrupt and unmask
530 * it after the associated handler has acknowledged the device, so the
531 * interrupt line is back to inactive.
532 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200533void handle_level_irq(struct irq_desc *desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700534{
Thomas Gleixner239007b2009-11-17 16:46:45 +0100535 raw_spin_lock(&desc->lock);
Thomas Gleixner9205e312010-09-27 12:44:50 +0000536 mask_ack_irq(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700537
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200538 if (!irq_may_run(desc))
539 goto out_unlock;
Thomas Gleixnerfe200ae2011-02-07 10:34:30 +0100540
Thomas Gleixner163ef302011-02-08 11:39:15 +0100541 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700542
543 /*
544 * If its disabled or no action available
545 * keep it masked and get out of here
546 */
Thomas Gleixnerd4dc0f92012-04-25 12:54:54 +0200547 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
548 desc->istate |= IRQS_PENDING;
Ingo Molnar86998aa2006-09-19 11:14:34 +0200549 goto out_unlock;
Thomas Gleixnerd4dc0f92012-04-25 12:54:54 +0200550 }
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700551
Sudeep Hollaa946e8c2015-11-04 18:32:37 +0000552 kstat_incr_irqs_this_cpu(desc);
Thomas Gleixner15298662011-02-07 01:22:17 +0100553 handle_irq_event(desc);
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200554
Thomas Gleixnerac563762012-02-07 17:58:03 +0100555 cond_unmask_irq(desc);
556
Ingo Molnar86998aa2006-09-19 11:14:34 +0200557out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100558 raw_spin_unlock(&desc->lock);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700559}
Ingo Molnar14819ea2009-01-14 12:34:21 +0100560EXPORT_SYMBOL_GPL(handle_level_irq);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700561
Thomas Gleixner78129572011-02-10 15:14:20 +0100562#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
563static inline void preflow_handler(struct irq_desc *desc)
564{
565 if (desc->preflow_handler)
566 desc->preflow_handler(&desc->irq_data);
567}
568#else
569static inline void preflow_handler(struct irq_desc *desc) { }
570#endif
571
Thomas Gleixner328a4972014-03-13 19:03:51 +0100572static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
573{
574 if (!(desc->istate & IRQS_ONESHOT)) {
575 chip->irq_eoi(&desc->irq_data);
576 return;
577 }
578 /*
579 * We need to unmask in the following cases:
580 * - Oneshot irq which did not wake the thread (caused by a
581 * spurious interrupt or a primary handler handling it
582 * completely).
583 */
584 if (!irqd_irq_disabled(&desc->irq_data) &&
585 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
586 chip->irq_eoi(&desc->irq_data);
587 unmask_irq(desc);
588 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
589 chip->irq_eoi(&desc->irq_data);
590 }
591}
592
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700593/**
Ingo Molnar47c2a3a2006-06-29 02:25:03 -0700594 * handle_fasteoi_irq - irq handler for transparent controllers
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700595 * @desc: the interrupt description structure for this irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700596 *
Ingo Molnar47c2a3a2006-06-29 02:25:03 -0700597 * Only a single callback will be issued to the chip: an ->eoi()
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700598 * call when the interrupt has been serviced. This enables support
599 * for modern forms of interrupt handlers, which handle the flow
600 * details in hardware, transparently.
601 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200602void handle_fasteoi_irq(struct irq_desc *desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700603{
Thomas Gleixner328a4972014-03-13 19:03:51 +0100604 struct irq_chip *chip = desc->irq_data.chip;
605
Thomas Gleixner239007b2009-11-17 16:46:45 +0100606 raw_spin_lock(&desc->lock);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700607
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200608 if (!irq_may_run(desc))
609 goto out;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700610
Thomas Gleixner163ef302011-02-08 11:39:15 +0100611 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700612
613 /*
614 * If its disabled or no action available
Ingo Molnar76d21602007-02-16 01:28:24 -0800615 * then mask it and get out of here:
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700616 */
Thomas Gleixner32f41252011-03-28 14:10:52 +0200617 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
Thomas Gleixner2a0d6fb2011-02-08 12:17:57 +0100618 desc->istate |= IRQS_PENDING;
Thomas Gleixnere2c0f8f2010-09-27 12:44:42 +0000619 mask_irq(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700620 goto out;
Benjamin Herrenschmidt98bb2442006-06-29 02:25:01 -0700621 }
Thomas Gleixnerc69e3752011-03-02 11:49:21 +0100622
Sudeep Hollaa946e8c2015-11-04 18:32:37 +0000623 kstat_incr_irqs_this_cpu(desc);
Thomas Gleixnerc69e3752011-03-02 11:49:21 +0100624 if (desc->istate & IRQS_ONESHOT)
625 mask_irq(desc);
626
Thomas Gleixner78129572011-02-10 15:14:20 +0100627 preflow_handler(desc);
Thomas Gleixnera7ae4de2011-02-07 01:23:07 +0100628 handle_irq_event(desc);
Thomas Gleixner77694b42011-02-15 10:33:57 +0100629
Thomas Gleixner328a4972014-03-13 19:03:51 +0100630 cond_unmask_eoi_irq(desc, chip);
Thomas Gleixnerac563762012-02-07 17:58:03 +0100631
Thomas Gleixner239007b2009-11-17 16:46:45 +0100632 raw_spin_unlock(&desc->lock);
Thomas Gleixner77694b42011-02-15 10:33:57 +0100633 return;
634out:
Thomas Gleixner328a4972014-03-13 19:03:51 +0100635 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
636 chip->irq_eoi(&desc->irq_data);
637 raw_spin_unlock(&desc->lock);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700638}
Vincent Stehlé7cad45e2014-08-22 01:31:20 +0200639EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700640
641/**
642 * handle_edge_irq - edge type IRQ handler
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700643 * @desc: the interrupt description structure for this irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700644 *
645 * Interrupt occures on the falling and/or rising edge of a hardware
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300646 * signal. The occurrence is latched into the irq controller hardware
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700647 * and must be acked in order to be reenabled. After the ack another
648 * interrupt can happen on the same source even before the first one
Uwe Kleine-Königdfff0612010-02-12 21:58:11 +0100649 * is handled by the associated event handler. If this happens it
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700650 * might be necessary to disable (mask) the interrupt depending on the
651 * controller hardware. This requires to reenable the interrupt inside
652 * of the loop which handles the interrupts which have arrived while
653 * the handler was running. If all pending interrupts are handled, the
654 * loop is left.
655 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200656void handle_edge_irq(struct irq_desc *desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700657{
Thomas Gleixner239007b2009-11-17 16:46:45 +0100658 raw_spin_lock(&desc->lock);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700659
Thomas Gleixner163ef302011-02-08 11:39:15 +0100660 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
Thomas Gleixnerc3d7acd2014-08-29 13:46:08 +0200661
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200662 if (!irq_may_run(desc)) {
663 desc->istate |= IRQS_PENDING;
664 mask_ack_irq(desc);
665 goto out_unlock;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700666 }
Thomas Gleixnerc3d7acd2014-08-29 13:46:08 +0200667
668 /*
669 * If its disabled or no action available then mask it and get
670 * out of here.
671 */
672 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
673 desc->istate |= IRQS_PENDING;
674 mask_ack_irq(desc);
675 goto out_unlock;
676 }
677
Jiang Liub51bf952015-06-04 12:13:25 +0800678 kstat_incr_irqs_this_cpu(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700679
680 /* Start handling the irq */
Thomas Gleixner22a49162010-09-27 12:44:47 +0000681 desc->irq_data.chip->irq_ack(&desc->irq_data);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700682
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700683 do {
Thomas Gleixnera60a5dc2011-02-07 01:24:07 +0100684 if (unlikely(!desc->action)) {
Thomas Gleixnere2c0f8f2010-09-27 12:44:42 +0000685 mask_irq(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700686 goto out_unlock;
687 }
688
689 /*
690 * When another irq arrived while we were handling
691 * one, we could have masked the irq.
692 * Renable it, if it was not disabled in meantime.
693 */
Thomas Gleixner2a0d6fb2011-02-08 12:17:57 +0100694 if (unlikely(desc->istate & IRQS_PENDING)) {
Thomas Gleixner32f41252011-03-28 14:10:52 +0200695 if (!irqd_irq_disabled(&desc->irq_data) &&
696 irqd_irq_masked(&desc->irq_data))
Thomas Gleixnerc1594b72011-02-07 22:11:30 +0100697 unmask_irq(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700698 }
699
Thomas Gleixnera60a5dc2011-02-07 01:24:07 +0100700 handle_irq_event(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700701
Thomas Gleixner2a0d6fb2011-02-08 12:17:57 +0100702 } while ((desc->istate & IRQS_PENDING) &&
Thomas Gleixner32f41252011-03-28 14:10:52 +0200703 !irqd_irq_disabled(&desc->irq_data));
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700704
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700705out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100706 raw_spin_unlock(&desc->lock);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700707}
Jiri Kosina3911ff32012-05-13 12:13:15 +0200708EXPORT_SYMBOL(handle_edge_irq);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700709
Thomas Gleixner0521c8f2011-03-28 16:13:24 +0200710#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
711/**
712 * handle_edge_eoi_irq - edge eoi type IRQ handler
Thomas Gleixner0521c8f2011-03-28 16:13:24 +0200713 * @desc: the interrupt description structure for this irq
714 *
715 * Similar as the above handle_edge_irq, but using eoi and w/o the
716 * mask/unmask logic.
717 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200718void handle_edge_eoi_irq(struct irq_desc *desc)
Thomas Gleixner0521c8f2011-03-28 16:13:24 +0200719{
720 struct irq_chip *chip = irq_desc_get_chip(desc);
721
722 raw_spin_lock(&desc->lock);
723
724 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
Thomas Gleixnerc3d7acd2014-08-29 13:46:08 +0200725
Thomas Gleixnerc7bd3ec02014-08-29 13:39:37 +0200726 if (!irq_may_run(desc)) {
727 desc->istate |= IRQS_PENDING;
728 goto out_eoi;
Thomas Gleixner0521c8f2011-03-28 16:13:24 +0200729 }
Thomas Gleixnerc3d7acd2014-08-29 13:46:08 +0200730
731 /*
732 * If its disabled or no action available then mask it and get
733 * out of here.
734 */
735 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
736 desc->istate |= IRQS_PENDING;
737 goto out_eoi;
738 }
739
Jiang Liub51bf952015-06-04 12:13:25 +0800740 kstat_incr_irqs_this_cpu(desc);
Thomas Gleixner0521c8f2011-03-28 16:13:24 +0200741
742 do {
743 if (unlikely(!desc->action))
744 goto out_eoi;
745
746 handle_irq_event(desc);
747
748 } while ((desc->istate & IRQS_PENDING) &&
749 !irqd_irq_disabled(&desc->irq_data));
750
Stephen Rothwellac0e0442011-03-30 10:55:12 +1100751out_eoi:
Thomas Gleixner0521c8f2011-03-28 16:13:24 +0200752 chip->irq_eoi(&desc->irq_data);
753 raw_spin_unlock(&desc->lock);
754}
755#endif
756
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700757/**
Liuweni24b26d42009-11-04 20:11:05 +0800758 * handle_percpu_irq - Per CPU local irq handler
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700759 * @desc: the interrupt description structure for this irq
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700760 *
761 * Per CPU interrupts on SMP machines without locking requirements
762 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200763void handle_percpu_irq(struct irq_desc *desc)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700764{
Thomas Gleixner35e857c2011-02-10 12:20:23 +0100765 struct irq_chip *chip = irq_desc_get_chip(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700766
Jiang Liub51bf952015-06-04 12:13:25 +0800767 kstat_incr_irqs_this_cpu(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700768
Thomas Gleixner849f0612011-02-07 01:25:41 +0100769 if (chip->irq_ack)
770 chip->irq_ack(&desc->irq_data);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700771
Huang Shijie71f64342015-09-02 10:24:55 +0800772 handle_irq_event_percpu(desc);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700773
Thomas Gleixner849f0612011-02-07 01:25:41 +0100774 if (chip->irq_eoi)
775 chip->irq_eoi(&desc->irq_data);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700776}
777
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100778/**
779 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100780 * @desc: the interrupt description structure for this irq
781 *
782 * Per CPU interrupts on SMP machines without locking requirements. Same as
783 * handle_percpu_irq() above but with the following extras:
784 *
785 * action->percpu_dev_id is a pointer to percpu variables which
786 * contain the real device id for the cpu on which this handler is
787 * called
788 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200789void handle_percpu_devid_irq(struct irq_desc *desc)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100790{
791 struct irq_chip *chip = irq_desc_get_chip(desc);
792 struct irqaction *action = desc->action;
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200793 unsigned int irq = irq_desc_get_irq(desc);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100794 irqreturn_t res;
795
Jiang Liub51bf952015-06-04 12:13:25 +0800796 kstat_incr_irqs_this_cpu(desc);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100797
798 if (chip->irq_ack)
799 chip->irq_ack(&desc->irq_data);
800
Thomas Gleixnerfc590c22016-09-02 14:45:19 +0200801 if (likely(action)) {
802 trace_irq_handler_entry(irq, action);
803 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
804 trace_irq_handler_exit(irq, action, res);
805 } else {
806 unsigned int cpu = smp_processor_id();
807 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
808
809 if (enabled)
810 irq_percpu_disable(desc, cpu);
811
812 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
813 enabled ? " and unmasked" : "", irq, cpu);
814 }
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100815
816 if (chip->irq_eoi)
817 chip->irq_eoi(&desc->irq_data);
818}
819
Wei Yongjunb8129a12016-09-25 15:36:39 +0000820static void
Russell King3b0f95b2015-06-16 23:06:20 +0100821__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
822 int is_chained, const char *name)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700823{
Thomas Gleixner091738a2011-02-14 20:16:43 +0100824 if (!handle) {
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700825 handle = handle_bad_irq;
Thomas Gleixner091738a2011-02-14 20:16:43 +0100826 } else {
Marc Zyngierf86eff22014-11-15 10:49:13 +0000827 struct irq_data *irq_data = &desc->irq_data;
828#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
829 /*
830 * With hierarchical domains we might run into a
831 * situation where the outermost chip is not yet set
832 * up, but the inner chips are there. Instead of
833 * bailing we install the handler, but obviously we
834 * cannot enable/startup the interrupt at this point.
835 */
836 while (irq_data) {
837 if (irq_data->chip != &no_irq_chip)
838 break;
839 /*
840 * Bail out if the outer chip is not set up
841 * and the interrrupt supposed to be started
842 * right away.
843 */
844 if (WARN_ON(is_chained))
Russell King3b0f95b2015-06-16 23:06:20 +0100845 return;
Marc Zyngierf86eff22014-11-15 10:49:13 +0000846 /* Try the parent */
847 irq_data = irq_data->parent_data;
848 }
849#endif
850 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
Russell King3b0f95b2015-06-16 23:06:20 +0100851 return;
Thomas Gleixnerf8b54732006-07-01 22:30:08 +0100852 }
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700853
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700854 /* Uninstall? */
855 if (handle == handle_bad_irq) {
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200856 if (desc->irq_data.chip != &no_irq_chip)
Thomas Gleixner9205e312010-09-27 12:44:50 +0000857 mask_ack_irq(desc);
Thomas Gleixner801a0e92011-03-27 11:02:49 +0200858 irq_state_set_disabled(desc);
Mika Westerberge509bd72015-10-05 13:12:15 +0300859 if (is_chained)
860 desc->action = NULL;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700861 desc->depth = 1;
862 }
863 desc->handle_irq = handle;
Ingo Molnara460e742006-10-17 00:10:03 -0700864 desc->name = name;
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700865
866 if (handle != handle_bad_irq && is_chained) {
Marc Zyngier1984e072016-09-19 09:49:27 +0100867 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
868
Marc Zyngier1e12c4a2016-08-11 14:19:42 +0100869 /*
870 * We're about to start this interrupt immediately,
871 * hence the need to set the trigger configuration.
872 * But the .set_type callback may have overridden the
873 * flow handler, ignoring that we're dealing with a
874 * chained interrupt. Reset it immediately because we
875 * do know better.
876 */
Marc Zyngier1984e072016-09-19 09:49:27 +0100877 if (type != IRQ_TYPE_NONE) {
878 __irq_set_trigger(desc, type);
879 desc->handle_irq = handle;
880 }
Marc Zyngier1e12c4a2016-08-11 14:19:42 +0100881
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100882 irq_settings_set_noprobe(desc);
883 irq_settings_set_norequest(desc);
Paul Mundt7f1b1242011-04-07 06:01:44 +0900884 irq_settings_set_nothread(desc);
Mika Westerberge509bd72015-10-05 13:12:15 +0300885 desc->action = &chained_action;
Thomas Gleixnerb4bc7242012-02-08 11:57:52 +0100886 irq_startup(desc, true);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700887 }
Russell King3b0f95b2015-06-16 23:06:20 +0100888}
889
890void
891__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
892 const char *name)
893{
894 unsigned long flags;
895 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
896
897 if (!desc)
898 return;
899
900 __irq_do_set_handler(desc, handle, is_chained, name);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100901 irq_put_desc_busunlock(desc, flags);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700902}
Thomas Gleixner3836ca02011-02-14 20:09:19 +0100903EXPORT_SYMBOL_GPL(__irq_set_handler);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700904
905void
Russell King3b0f95b2015-06-16 23:06:20 +0100906irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
907 void *data)
908{
909 unsigned long flags;
910 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
911
912 if (!desc)
913 return;
914
Jiang Liuaf7080e2015-06-01 16:05:21 +0800915 desc->irq_common_data.handler_data = data;
Thomas Gleixner2c4569ca2017-05-11 13:54:11 +0200916 __irq_do_set_handler(desc, handle, 1, NULL);
Russell King3b0f95b2015-06-16 23:06:20 +0100917
918 irq_put_desc_busunlock(desc, flags);
919}
920EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
921
922void
Thomas Gleixner3836ca02011-02-14 20:09:19 +0100923irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
Ingo Molnara460e742006-10-17 00:10:03 -0700924 irq_flow_handler_t handle, const char *name)
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700925{
Thomas Gleixner35e857c2011-02-10 12:20:23 +0100926 irq_set_chip(irq, chip);
Thomas Gleixner3836ca02011-02-14 20:09:19 +0100927 __irq_set_handler(irq, handle, 0, name);
Thomas Gleixnerdd87eb32006-06-29 02:24:53 -0700928}
Kuninori Morimotob3ae66f2012-07-30 22:39:06 -0700929EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
Ralf Baechle46f4f8f2008-02-08 04:22:01 -0800930
Thomas Gleixner44247182010-09-28 10:40:18 +0200931void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
Ralf Baechle46f4f8f2008-02-08 04:22:01 -0800932{
Ralf Baechle46f4f8f2008-02-08 04:22:01 -0800933 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100934 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
Ralf Baechle46f4f8f2008-02-08 04:22:01 -0800935
Thomas Gleixner44247182010-09-28 10:40:18 +0200936 if (!desc)
Ralf Baechle46f4f8f2008-02-08 04:22:01 -0800937 return;
Thomas Gleixner04c848d2017-05-31 11:58:33 +0200938
939 /*
940 * Warn when a driver sets the no autoenable flag on an already
941 * active interrupt.
942 */
943 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
944
Thomas Gleixnera0056772011-02-08 17:11:03 +0100945 irq_settings_clr_and_set(desc, clr, set);
946
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100947 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
Thomas Gleixnere1ef8242011-02-10 22:25:31 +0100948 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
Thomas Gleixnera0056772011-02-08 17:11:03 +0100949 if (irq_settings_has_no_balance_set(desc))
950 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
951 if (irq_settings_is_per_cpu(desc))
952 irqd_set(&desc->irq_data, IRQD_PER_CPU);
Thomas Gleixnere1ef8242011-02-10 22:25:31 +0100953 if (irq_settings_can_move_pcntxt(desc))
954 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200955 if (irq_settings_is_level(desc))
956 irqd_set(&desc->irq_data, IRQD_LEVEL);
Thomas Gleixnera0056772011-02-08 17:11:03 +0100957
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100958 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
959
Thomas Gleixner02725e72011-02-12 10:37:36 +0100960 irq_put_desc_unlock(desc, flags);
Ralf Baechle46f4f8f2008-02-08 04:22:01 -0800961}
Jonathan Cameronedf76f82011-05-18 10:39:04 +0100962EXPORT_SYMBOL_GPL(irq_modify_status);
David Daney0fdb4b22011-03-25 12:38:49 -0700963
964/**
965 * irq_cpu_online - Invoke all irq_cpu_online functions.
966 *
967 * Iterate through all irqs and invoke the chip.irq_cpu_online()
968 * for each.
969 */
970void irq_cpu_online(void)
971{
972 struct irq_desc *desc;
973 struct irq_chip *chip;
974 unsigned long flags;
975 unsigned int irq;
976
977 for_each_active_irq(irq) {
978 desc = irq_to_desc(irq);
979 if (!desc)
980 continue;
981
982 raw_spin_lock_irqsave(&desc->lock, flags);
983
984 chip = irq_data_get_irq_chip(&desc->irq_data);
Thomas Gleixnerb3d42232011-03-27 16:05:36 +0200985 if (chip && chip->irq_cpu_online &&
986 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
Thomas Gleixner32f41252011-03-28 14:10:52 +0200987 !irqd_irq_disabled(&desc->irq_data)))
David Daney0fdb4b22011-03-25 12:38:49 -0700988 chip->irq_cpu_online(&desc->irq_data);
989
990 raw_spin_unlock_irqrestore(&desc->lock, flags);
991 }
992}
993
994/**
995 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
996 *
997 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
998 * for each.
999 */
1000void irq_cpu_offline(void)
1001{
1002 struct irq_desc *desc;
1003 struct irq_chip *chip;
1004 unsigned long flags;
1005 unsigned int irq;
1006
1007 for_each_active_irq(irq) {
1008 desc = irq_to_desc(irq);
1009 if (!desc)
1010 continue;
1011
1012 raw_spin_lock_irqsave(&desc->lock, flags);
1013
1014 chip = irq_data_get_irq_chip(&desc->irq_data);
Thomas Gleixnerb3d42232011-03-27 16:05:36 +02001015 if (chip && chip->irq_cpu_offline &&
1016 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
Thomas Gleixner32f41252011-03-28 14:10:52 +02001017 !irqd_irq_disabled(&desc->irq_data)))
David Daney0fdb4b22011-03-25 12:38:49 -07001018 chip->irq_cpu_offline(&desc->irq_data);
1019
1020 raw_spin_unlock_irqrestore(&desc->lock, flags);
1021 }
1022}
Jiang Liu85f08c12014-11-06 22:20:16 +08001023
1024#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1025/**
Stefan Agner3cfeffc2015-05-16 11:44:14 +02001026 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1027 * NULL)
1028 * @data: Pointer to interrupt specific data
1029 */
1030void irq_chip_enable_parent(struct irq_data *data)
1031{
1032 data = data->parent_data;
1033 if (data->chip->irq_enable)
1034 data->chip->irq_enable(data);
1035 else
1036 data->chip->irq_unmask(data);
1037}
1038
1039/**
1040 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1041 * NULL)
1042 * @data: Pointer to interrupt specific data
1043 */
1044void irq_chip_disable_parent(struct irq_data *data)
1045{
1046 data = data->parent_data;
1047 if (data->chip->irq_disable)
1048 data->chip->irq_disable(data);
1049 else
1050 data->chip->irq_mask(data);
1051}
1052
1053/**
Jiang Liu85f08c12014-11-06 22:20:16 +08001054 * irq_chip_ack_parent - Acknowledge the parent interrupt
1055 * @data: Pointer to interrupt specific data
1056 */
1057void irq_chip_ack_parent(struct irq_data *data)
1058{
1059 data = data->parent_data;
1060 data->chip->irq_ack(data);
1061}
Jake Oshinsa4289dc2015-12-10 17:52:59 +00001062EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
Jiang Liu85f08c12014-11-06 22:20:16 +08001063
1064/**
Yingjoe Chen56e8aba2014-11-13 23:37:05 +08001065 * irq_chip_mask_parent - Mask the parent interrupt
1066 * @data: Pointer to interrupt specific data
1067 */
1068void irq_chip_mask_parent(struct irq_data *data)
1069{
1070 data = data->parent_data;
1071 data->chip->irq_mask(data);
1072}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001073EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
Yingjoe Chen56e8aba2014-11-13 23:37:05 +08001074
1075/**
1076 * irq_chip_unmask_parent - Unmask the parent interrupt
1077 * @data: Pointer to interrupt specific data
1078 */
1079void irq_chip_unmask_parent(struct irq_data *data)
1080{
1081 data = data->parent_data;
1082 data->chip->irq_unmask(data);
1083}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001084EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
Yingjoe Chen56e8aba2014-11-13 23:37:05 +08001085
1086/**
1087 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1088 * @data: Pointer to interrupt specific data
1089 */
1090void irq_chip_eoi_parent(struct irq_data *data)
1091{
1092 data = data->parent_data;
1093 data->chip->irq_eoi(data);
1094}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001095EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
Yingjoe Chen56e8aba2014-11-13 23:37:05 +08001096
1097/**
1098 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1099 * @data: Pointer to interrupt specific data
1100 * @dest: The affinity mask to set
1101 * @force: Flag to enforce setting (disable online checks)
1102 *
1103 * Conditinal, as the underlying parent chip might not implement it.
1104 */
1105int irq_chip_set_affinity_parent(struct irq_data *data,
1106 const struct cpumask *dest, bool force)
1107{
1108 data = data->parent_data;
1109 if (data->chip->irq_set_affinity)
1110 return data->chip->irq_set_affinity(data, dest, force);
1111
1112 return -ENOSYS;
1113}
1114
1115/**
Grygorii Strashkob7560de2015-08-14 15:20:26 +03001116 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1117 * @data: Pointer to interrupt specific data
1118 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1119 *
1120 * Conditional, as the underlying parent chip might not implement it.
1121 */
1122int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1123{
1124 data = data->parent_data;
1125
1126 if (data->chip->irq_set_type)
1127 return data->chip->irq_set_type(data, type);
1128
1129 return -ENOSYS;
1130}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001131EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
Grygorii Strashkob7560de2015-08-14 15:20:26 +03001132
1133/**
Jiang Liu85f08c12014-11-06 22:20:16 +08001134 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1135 * @data: Pointer to interrupt specific data
1136 *
1137 * Iterate through the domain hierarchy of the interrupt and check
1138 * whether a hw retrigger function exists. If yes, invoke it.
1139 */
1140int irq_chip_retrigger_hierarchy(struct irq_data *data)
1141{
1142 for (data = data->parent_data; data; data = data->parent_data)
1143 if (data->chip && data->chip->irq_retrigger)
1144 return data->chip->irq_retrigger(data);
1145
Grygorii Strashko6d4affe2015-08-14 15:20:25 +03001146 return 0;
Jiang Liu85f08c12014-11-06 22:20:16 +08001147}
Marc Zyngier08b55e22015-03-11 15:43:43 +00001148
1149/**
Jiang Liu0a4377d2015-05-19 17:07:14 +08001150 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1151 * @data: Pointer to interrupt specific data
Masanari Iida8505a812015-07-29 19:09:36 +09001152 * @vcpu_info: The vcpu affinity information
Jiang Liu0a4377d2015-05-19 17:07:14 +08001153 */
1154int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1155{
1156 data = data->parent_data;
1157 if (data->chip->irq_set_vcpu_affinity)
1158 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1159
1160 return -ENOSYS;
1161}
1162
1163/**
Marc Zyngier08b55e22015-03-11 15:43:43 +00001164 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1165 * @data: Pointer to interrupt specific data
1166 * @on: Whether to set or reset the wake-up capability of this irq
1167 *
1168 * Conditional, as the underlying parent chip might not implement it.
1169 */
1170int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1171{
1172 data = data->parent_data;
1173 if (data->chip->irq_set_wake)
1174 return data->chip->irq_set_wake(data, on);
1175
1176 return -ENOSYS;
1177}
Jiang Liu85f08c12014-11-06 22:20:16 +08001178#endif
Jiang Liu515085e2014-11-06 22:20:17 +08001179
1180/**
1181 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1182 * @data: Pointer to interrupt specific data
1183 * @msg: Pointer to the MSI message
1184 *
1185 * For hierarchical domains we find the first chip in the hierarchy
1186 * which implements the irq_compose_msi_msg callback. For non
1187 * hierarchical we use the top level chip.
1188 */
1189int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1190{
1191 struct irq_data *pos = NULL;
1192
1193#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1194 for (; data; data = data->parent_data)
1195#endif
1196 if (data->chip && data->chip->irq_compose_msi_msg)
1197 pos = data;
1198 if (!pos)
1199 return -ENOSYS;
1200
1201 pos->chip->irq_compose_msi_msg(pos, msg);
1202
1203 return 0;
1204}
Jon Hunterbe45beb2016-06-07 16:12:29 +01001205
1206/**
1207 * irq_chip_pm_get - Enable power for an IRQ chip
1208 * @data: Pointer to interrupt specific data
1209 *
1210 * Enable the power to the IRQ chip referenced by the interrupt data
1211 * structure.
1212 */
1213int irq_chip_pm_get(struct irq_data *data)
1214{
1215 int retval;
1216
1217 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1218 retval = pm_runtime_get_sync(data->chip->parent_device);
1219 if (retval < 0) {
1220 pm_runtime_put_noidle(data->chip->parent_device);
1221 return retval;
1222 }
1223 }
1224
1225 return 0;
1226}
1227
1228/**
1229 * irq_chip_pm_put - Disable power for an IRQ chip
1230 * @data: Pointer to interrupt specific data
1231 *
1232 * Disable the power to the IRQ chip referenced by the interrupt data
1233 * structure, belongs. Note that power will only be disabled, once this
1234 * function has been called for all IRQs that have called irq_chip_pm_get().
1235 */
1236int irq_chip_pm_put(struct irq_data *data)
1237{
1238 int retval = 0;
1239
1240 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1241 retval = pm_runtime_put(data->chip->parent_device);
1242
1243 return (retval < 0) ? retval : 0;
1244}