blob: 5b570050b9b697391780a7f83ae9f136207e29da [file] [log] [blame]
Thomas Gleixner52a65ff2018-03-14 22:15:19 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Ingo Molnara34db9b2006-06-29 02:24:50 -07003 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
Andrew Morton97fd75b2012-05-31 16:26:07 -07009#define pr_fmt(fmt) "genirq: " fmt
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/irq.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010012#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070016#include <linux/slab.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010017#include <linux/sched.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060018#include <linux/sched/rt.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010019#include <linux/sched/task.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010020#include <uapi/linux/sched/types.h>
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +100021#include <linux/task_work.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#include "internals.h"
24
Thomas Gleixner8d32a302011-02-23 23:52:23 +000025#ifdef CONFIG_IRQ_FORCED_THREADING
26__read_mostly bool force_irqthreads;
Sebastian Andrzej Siewior47b82e82018-05-04 16:24:46 +020027EXPORT_SYMBOL_GPL(force_irqthreads);
Thomas Gleixner8d32a302011-02-23 23:52:23 +000028
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
Thomas Gleixner18258f72014-02-15 00:55:18 +000037static void __synchronize_hardirq(struct irq_desc *desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Thomas Gleixner32f41252011-03-28 14:10:52 +020039 bool inprogress;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Herbert Xua98ce5c2007-10-23 11:26:25 +080041 do {
42 unsigned long flags;
43
44 /*
45 * Wait until we're out of the critical section. This might
46 * give the wrong answer due to the lack of memory barriers.
47 */
Thomas Gleixner32f41252011-03-28 14:10:52 +020048 while (irqd_irq_inprogress(&desc->irq_data))
Herbert Xua98ce5c2007-10-23 11:26:25 +080049 cpu_relax();
50
51 /* Ok, that indicated we're done: double-check carefully. */
Thomas Gleixner239007b2009-11-17 16:46:45 +010052 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner32f41252011-03-28 14:10:52 +020053 inprogress = irqd_irq_inprogress(&desc->irq_data);
Thomas Gleixner239007b2009-11-17 16:46:45 +010054 raw_spin_unlock_irqrestore(&desc->lock, flags);
Herbert Xua98ce5c2007-10-23 11:26:25 +080055
56 /* Oops, that failed? */
Thomas Gleixner32f41252011-03-28 14:10:52 +020057 } while (inprogress);
Thomas Gleixner18258f72014-02-15 00:55:18 +000058}
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010059
Thomas Gleixner18258f72014-02-15 00:55:18 +000060/**
61 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
62 * @irq: interrupt number to wait for
63 *
64 * This function waits for any pending hard IRQ handlers for this
65 * interrupt to complete before returning. If you use this
66 * function while holding a resource the IRQ handler may need you
67 * will deadlock. It does not take associated threaded handlers
68 * into account.
69 *
70 * Do not use this for shutdown scenarios where you must be sure
71 * that all parts (hardirq and threaded handler) have completed.
72 *
Peter Zijlstra02cea392015-02-05 14:06:23 +010073 * Returns: false if a threaded handler is active.
74 *
Thomas Gleixner18258f72014-02-15 00:55:18 +000075 * This function may be called - with care - from IRQ context.
76 */
Peter Zijlstra02cea392015-02-05 14:06:23 +010077bool synchronize_hardirq(unsigned int irq)
Thomas Gleixner18258f72014-02-15 00:55:18 +000078{
79 struct irq_desc *desc = irq_to_desc(irq);
80
Peter Zijlstra02cea392015-02-05 14:06:23 +010081 if (desc) {
Thomas Gleixner18258f72014-02-15 00:55:18 +000082 __synchronize_hardirq(desc);
Peter Zijlstra02cea392015-02-05 14:06:23 +010083 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
Thomas Gleixner18258f72014-02-15 00:55:18 +000087}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90/**
91 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
92 * @irq: interrupt number to wait for
93 *
94 * This function waits for any pending IRQ handlers for this interrupt
95 * to complete before returning. If you use this function while
96 * holding a resource the IRQ handler may need you will deadlock.
97 *
98 * This function may be called - with care - from IRQ context.
99 */
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106 /*
107 * We made sure that no hardirq handler is
108 * running. Now verify that no threaded handlers are
109 * active.
110 */
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115EXPORT_SYMBOL(synchronize_irq);
116
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
Thomas Gleixner9c255582016-07-04 17:39:23 +0900120static bool __irq_can_set_affinity(struct irq_desc *desc)
Jiang Liue019c242015-06-23 20:29:34 +0200121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
Thomas Gleixner9c255582016-07-04 17:39:23 +0900124 return false;
125 return true;
Jiang Liue019c242015-06-23 20:29:34 +0200126}
127
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800128/**
129 * irq_can_set_affinity - Check if the affinity of a given irq can be set
130 * @irq: Interrupt to check
131 *
132 */
133int irq_can_set_affinity(unsigned int irq)
134{
Jiang Liue019c242015-06-23 20:29:34 +0200135 return __irq_can_set_affinity(irq_to_desc(irq));
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800136}
137
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200138/**
Thomas Gleixner9c255582016-07-04 17:39:23 +0900139 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
140 * @irq: Interrupt to check
141 *
142 * Like irq_can_set_affinity() above, but additionally checks for the
143 * AFFINITY_MANAGED flag.
144 */
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153/**
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200154 * irq_set_thread_affinity - Notify irq threads to adjust affinity
155 * @desc: irq descriptor which has affitnity changed
156 *
157 * We just set IRQTF_AFFINITY and delegate the affinity setting
158 * to the interrupt thread itself. We can not call
159 * set_cpus_allowed_ptr() here as we hold desc->lock and this
160 * code can be called from hard interrupt context.
161 */
162void irq_set_thread_affinity(struct irq_desc *desc)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100163{
Daniel Lezcanof944b5a2016-01-14 10:54:13 +0100164 struct irqaction *action;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100165
Daniel Lezcanof944b5a2016-01-14 10:54:13 +0100166 for_each_action_of_desc(desc, action)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100167 if (action->thread)
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100169}
170
Thomas Gleixner19e1d4e2017-10-09 12:41:36 +0200171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
Jiang Liu818b0f32012-03-30 23:11:34 +0800184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
Thomas Gleixnere43b3b52017-10-04 21:07:38 +0200191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000194 ret = chip->irq_set_affinity(data, mask, force);
Jiang Liu818b0f32012-03-30 23:11:34 +0800195 switch (ret) {
196 case IRQ_SET_MASK_OK:
Jiang Liu2cb62542014-11-06 22:20:18 +0800197 case IRQ_SET_MASK_OK_DONE:
Jiang Liu9df872f2015-06-03 11:47:50 +0800198 cpumask_copy(desc->irq_common_data.affinity, mask);
Jiang Liu818b0f32012-03-30 23:11:34 +0800199 case IRQ_SET_MASK_OK_NOCOPY:
Thomas Gleixner19e1d4e2017-10-09 12:41:36 +0200200 irq_validate_effective_affinity(data);
Jiang Liu818b0f32012-03-30 23:11:34 +0800201 irq_set_thread_affinity(desc);
202 ret = 0;
203 }
204
205 return ret;
206}
207
Thomas Gleixner12f47072018-06-04 17:33:59 +0200208#ifdef CONFIG_GENERIC_PENDING_IRQ
209static inline int irq_set_affinity_pending(struct irq_data *data,
210 const struct cpumask *dest)
211{
212 struct irq_desc *desc = irq_data_to_desc(data);
213
214 irqd_set_move_pending(data);
215 irq_copy_pending(desc, dest);
216 return 0;
217}
218#else
219static inline int irq_set_affinity_pending(struct irq_data *data,
220 const struct cpumask *dest)
221{
222 return -EBUSY;
223}
224#endif
225
226static int irq_try_set_affinity(struct irq_data *data,
227 const struct cpumask *dest, bool force)
228{
229 int ret = irq_do_set_affinity(data, dest, force);
230
231 /*
232 * In case that the underlying vector management is busy and the
233 * architecture supports the generic pending mechanism then utilize
234 * this to avoid returning an error to user space.
235 */
236 if (ret == -EBUSY && !force)
237 ret = irq_set_affinity_pending(data, dest);
238 return ret;
239}
240
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000241int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
242 bool force)
David Daneyc2d0c552011-03-25 12:38:50 -0700243{
244 struct irq_chip *chip = irq_data_get_irq_chip(data);
245 struct irq_desc *desc = irq_data_to_desc(data);
246 int ret = 0;
247
248 if (!chip || !chip->irq_set_affinity)
249 return -EINVAL;
250
Thomas Gleixner12f47072018-06-04 17:33:59 +0200251 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
252 ret = irq_try_set_affinity(data, mask, force);
David Daneyc2d0c552011-03-25 12:38:50 -0700253 } else {
254 irqd_set_move_pending(data);
255 irq_copy_pending(desc, mask);
256 }
257
258 if (desc->affinity_notify) {
259 kref_get(&desc->affinity_notify->kref);
260 schedule_work(&desc->affinity_notify->work);
261 }
David Daneyc2d0c552011-03-25 12:38:50 -0700262 irqd_set(data, IRQD_AFFINITY_SET);
263
264 return ret;
265}
266
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000267int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800268{
Yinghai Lu08678b02008-08-19 20:50:05 -0700269 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100270 unsigned long flags;
David Daneyc2d0c552011-03-25 12:38:50 -0700271 int ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800272
David Daneyc2d0c552011-03-25 12:38:50 -0700273 if (!desc)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800274 return -EINVAL;
275
Thomas Gleixner239007b2009-11-17 16:46:45 +0100276 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000277 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100278 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100279 return ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800280}
281
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700282int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
283{
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700284 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100285 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700286
287 if (!desc)
288 return -EINVAL;
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700289 desc->affinity_hint = m;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100290 irq_put_desc_unlock(desc, flags);
Jesse Brandeburge2e64a92014-12-18 17:22:06 -0800291 /* set the initial affinity to prevent every interrupt being on CPU0 */
Jesse Brandeburg4fe7ffb2015-01-28 10:57:39 -0800292 if (m)
293 __irq_set_affinity(irq, m, false);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700294 return 0;
295}
296EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
297
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000298static void irq_affinity_notify(struct work_struct *work)
299{
300 struct irq_affinity_notify *notify =
301 container_of(work, struct irq_affinity_notify, work);
302 struct irq_desc *desc = irq_to_desc(notify->irq);
303 cpumask_var_t cpumask;
304 unsigned long flags;
305
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100306 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000307 goto out;
308
309 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200310 if (irq_move_pending(&desc->irq_data))
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100311 irq_get_pending(cpumask, desc);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000312 else
Jiang Liu9df872f2015-06-03 11:47:50 +0800313 cpumask_copy(cpumask, desc->irq_common_data.affinity);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000314 raw_spin_unlock_irqrestore(&desc->lock, flags);
315
316 notify->notify(notify, cpumask);
317
318 free_cpumask_var(cpumask);
319out:
320 kref_put(&notify->kref, notify->release);
321}
322
323/**
324 * irq_set_affinity_notifier - control notification of IRQ affinity changes
325 * @irq: Interrupt for which to enable/disable notification
326 * @notify: Context for notification, or %NULL to disable
327 * notification. Function pointers must be initialised;
328 * the other fields will be initialised by this function.
329 *
330 * Must be called in process context. Notification may only be enabled
331 * after the IRQ is allocated and must be disabled before the IRQ is
332 * freed using free_irq().
333 */
334int
335irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
336{
337 struct irq_desc *desc = irq_to_desc(irq);
338 struct irq_affinity_notify *old_notify;
339 unsigned long flags;
340
341 /* The release function is promised process context */
342 might_sleep();
343
Julien Thierryb5259032019-01-31 14:53:58 +0000344 if (!desc || desc->istate & IRQS_NMI)
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000345 return -EINVAL;
346
347 /* Complete initialisation of *notify */
348 if (notify) {
349 notify->irq = irq;
350 kref_init(&notify->kref);
351 INIT_WORK(&notify->work, irq_affinity_notify);
352 }
353
354 raw_spin_lock_irqsave(&desc->lock, flags);
355 old_notify = desc->affinity_notify;
356 desc->affinity_notify = notify;
357 raw_spin_unlock_irqrestore(&desc->lock, flags);
358
359 if (old_notify)
360 kref_put(&old_notify->kref, old_notify->release);
361
362 return 0;
363}
364EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
365
Max Krasnyansky18404752008-05-29 11:02:52 -0700366#ifndef CONFIG_AUTO_IRQ_AFFINITY
367/*
368 * Generic version of the affinity autoselector.
369 */
Thomas Gleixner43564bd2017-06-20 01:37:22 +0200370int irq_setup_affinity(struct irq_desc *desc)
Max Krasnyansky18404752008-05-29 11:02:52 -0700371{
Thomas Gleixner569bda82011-02-07 17:05:08 +0100372 struct cpumask *set = irq_default_affinity;
Thomas Gleixnercba42352017-06-20 01:37:21 +0200373 int ret, node = irq_desc_get_node(desc);
374 static DEFINE_RAW_SPINLOCK(mask_lock);
375 static struct cpumask mask;
Thomas Gleixner569bda82011-02-07 17:05:08 +0100376
Thomas Gleixnerb0082072011-02-07 17:30:50 +0100377 /* Excludes PER_CPU and NO_BALANCE interrupts */
Jiang Liue019c242015-06-23 20:29:34 +0200378 if (!__irq_can_set_affinity(desc))
Max Krasnyansky18404752008-05-29 11:02:52 -0700379 return 0;
380
Thomas Gleixnercba42352017-06-20 01:37:21 +0200381 raw_spin_lock(&mask_lock);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100382 /*
Masahiro Yamada9332ef92017-02-27 14:28:47 -0800383 * Preserve the managed affinity setting and a userspace affinity
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900384 * setup, but make sure that one of the targets is online.
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100385 */
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900386 if (irqd_affinity_is_managed(&desc->irq_data) ||
387 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
Jiang Liu9df872f2015-06-03 11:47:50 +0800388 if (cpumask_intersects(desc->irq_common_data.affinity,
Thomas Gleixner569bda82011-02-07 17:05:08 +0100389 cpu_online_mask))
Jiang Liu9df872f2015-06-03 11:47:50 +0800390 set = desc->irq_common_data.affinity;
Thomas Gleixner0c6f8a82011-03-28 13:32:20 +0200391 else
Thomas Gleixner2bdd1052011-02-08 17:22:00 +0100392 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100393 }
Max Krasnyansky18404752008-05-29 11:02:52 -0700394
Thomas Gleixnercba42352017-06-20 01:37:21 +0200395 cpumask_and(&mask, cpu_online_mask, set);
Prarit Bhargava241fc642012-03-26 15:02:18 -0400396 if (node != NUMA_NO_NODE) {
397 const struct cpumask *nodemask = cpumask_of_node(node);
398
399 /* make sure at least one of the cpus in nodemask is online */
Thomas Gleixnercba42352017-06-20 01:37:21 +0200400 if (cpumask_intersects(&mask, nodemask))
401 cpumask_and(&mask, &mask, nodemask);
Prarit Bhargava241fc642012-03-26 15:02:18 -0400402 }
Thomas Gleixnercba42352017-06-20 01:37:21 +0200403 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
404 raw_spin_unlock(&mask_lock);
405 return ret;
Max Krasnyansky18404752008-05-29 11:02:52 -0700406}
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100407#else
Jiang Liua8a98ea2015-06-04 12:13:30 +0800408/* Wrapper for ALPHA specific affinity selector magic */
Thomas Gleixnercba42352017-06-20 01:37:21 +0200409int irq_setup_affinity(struct irq_desc *desc)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100410{
Thomas Gleixnercba42352017-06-20 01:37:21 +0200411 return irq_select_affinity(irq_desc_get_irq(desc));
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100412}
Max Krasnyansky18404752008-05-29 11:02:52 -0700413#endif
414
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100415/*
Thomas Gleixnercba42352017-06-20 01:37:21 +0200416 * Called when a bogus affinity is set via /proc/irq
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100417 */
Thomas Gleixnercba42352017-06-20 01:37:21 +0200418int irq_select_affinity_usr(unsigned int irq)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100419{
420 struct irq_desc *desc = irq_to_desc(irq);
421 unsigned long flags;
422 int ret;
423
Thomas Gleixner239007b2009-11-17 16:46:45 +0100424 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixnercba42352017-06-20 01:37:21 +0200425 ret = irq_setup_affinity(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100426 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100427 return ret;
428}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429#endif
430
Feng Wufcf1ae22015-10-03 16:20:38 +0800431/**
432 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
433 * @irq: interrupt number to set affinity
Christoffer Dall250a53d2017-10-27 10:34:33 +0200434 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
435 * specific data for percpu_devid interrupts
Feng Wufcf1ae22015-10-03 16:20:38 +0800436 *
437 * This function uses the vCPU specific data to set the vCPU
438 * affinity for an irq. The vCPU specific data is passed from
439 * outside, such as KVM. One example code path is as below:
440 * KVM -> IOMMU -> irq_set_vcpu_affinity().
441 */
442int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
443{
444 unsigned long flags;
445 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
446 struct irq_data *data;
447 struct irq_chip *chip;
448 int ret = -ENOSYS;
449
450 if (!desc)
451 return -EINVAL;
452
453 data = irq_desc_get_irq_data(desc);
Marc Zyngier0abce642017-06-23 21:42:57 +0100454 do {
455 chip = irq_data_get_irq_chip(data);
456 if (chip && chip->irq_set_vcpu_affinity)
457 break;
458#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
459 data = data->parent_data;
460#else
461 data = NULL;
462#endif
463 } while (data);
464
465 if (data)
Feng Wufcf1ae22015-10-03 16:20:38 +0800466 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
467 irq_put_desc_unlock(desc, flags);
468
469 return ret;
470}
471EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
472
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200473void __disable_irq(struct irq_desc *desc)
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100474{
Thomas Gleixner3aae9942011-02-04 10:17:52 +0100475 if (!desc->depth++)
Thomas Gleixner87923472011-02-03 12:27:44 +0100476 irq_disable(desc);
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100477}
478
Thomas Gleixner02725e72011-02-12 10:37:36 +0100479static int __disable_irq_nosync(unsigned int irq)
480{
481 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100482 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100483
484 if (!desc)
485 return -EINVAL;
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200486 __disable_irq(desc);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100487 irq_put_desc_busunlock(desc, flags);
488 return 0;
489}
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491/**
492 * disable_irq_nosync - disable an irq without waiting
493 * @irq: Interrupt to disable
494 *
495 * Disable the selected interrupt line. Disables and Enables are
496 * nested.
497 * Unlike disable_irq(), this function does not ensure existing
498 * instances of the IRQ handler have completed before returning.
499 *
500 * This function may be called from IRQ context.
501 */
502void disable_irq_nosync(unsigned int irq)
503{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100504 __disable_irq_nosync(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506EXPORT_SYMBOL(disable_irq_nosync);
507
508/**
509 * disable_irq - disable an irq and wait for completion
510 * @irq: Interrupt to disable
511 *
512 * Disable the selected interrupt line. Enables and Disables are
513 * nested.
514 * This function waits for any pending IRQ handlers for this interrupt
515 * to complete before returning. If you use this function while
516 * holding a resource the IRQ handler may need you will deadlock.
517 *
518 * This function may be called - with care - from IRQ context.
519 */
520void disable_irq(unsigned int irq)
521{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100522 if (!__disable_irq_nosync(irq))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 synchronize_irq(irq);
524}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525EXPORT_SYMBOL(disable_irq);
526
Peter Zijlstra02cea392015-02-05 14:06:23 +0100527/**
528 * disable_hardirq - disables an irq and waits for hardirq completion
529 * @irq: Interrupt to disable
530 *
531 * Disable the selected interrupt line. Enables and Disables are
532 * nested.
533 * This function waits for any pending hard IRQ handlers for this
534 * interrupt to complete before returning. If you use this function while
535 * holding a resource the hard IRQ handler may need you will deadlock.
536 *
537 * When used to optimistically disable an interrupt from atomic context
538 * the return value must be checked.
539 *
540 * Returns: false if a threaded handler is active.
541 *
542 * This function may be called - with care - from IRQ context.
543 */
544bool disable_hardirq(unsigned int irq)
545{
546 if (!__disable_irq_nosync(irq))
547 return synchronize_hardirq(irq);
548
549 return false;
550}
551EXPORT_SYMBOL_GPL(disable_hardirq);
552
Julien Thierryb5259032019-01-31 14:53:58 +0000553/**
554 * disable_nmi_nosync - disable an nmi without waiting
555 * @irq: Interrupt to disable
556 *
557 * Disable the selected interrupt line. Disables and enables are
558 * nested.
559 * The interrupt to disable must have been requested through request_nmi.
560 * Unlike disable_nmi(), this function does not ensure existing
561 * instances of the IRQ handler have completed before returning.
562 */
563void disable_nmi_nosync(unsigned int irq)
564{
565 disable_irq_nosync(irq);
566}
567
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200568void __enable_irq(struct irq_desc *desc)
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200569{
570 switch (desc->depth) {
571 case 0:
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100572 err_out:
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200573 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
574 irq_desc_get_irq(desc));
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200575 break;
576 case 1: {
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100577 if (desc->istate & IRQS_SUSPENDED)
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100578 goto err_out;
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200579 /* Prevent probing on this irq: */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100580 irq_settings_set_noprobe(desc);
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200581 /*
582 * Call irq_startup() not irq_enable() here because the
583 * interrupt might be marked NOAUTOEN. So irq_startup()
584 * needs to be invoked when it gets enabled the first
585 * time. If it was already started up, then irq_startup()
586 * will invoke irq_enable() under the hood.
587 */
Thomas Gleixnerc942cee2017-09-13 23:29:09 +0200588 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200589 break;
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200590 }
591 default:
592 desc->depth--;
593 }
594}
595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596/**
597 * enable_irq - enable handling of an irq
598 * @irq: Interrupt to enable
599 *
600 * Undoes the effect of one call to disable_irq(). If this
601 * matches the last disable, processing of interrupts on this
602 * IRQ line is re-enabled.
603 *
Thomas Gleixner70aedd22009-08-13 12:17:48 +0200604 * This function may be called from IRQ context only when
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200605 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 */
607void enable_irq(unsigned int irq)
608{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100610 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700612 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -0700613 return;
Thomas Gleixner50f7c032011-02-03 13:23:54 +0100614 if (WARN(!desc->irq_data.chip,
615 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
Thomas Gleixner02725e72011-02-12 10:37:36 +0100616 goto out;
Thomas Gleixner2656c362010-10-22 14:47:57 +0200617
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200618 __enable_irq(desc);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100619out:
620 irq_put_desc_busunlock(desc, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622EXPORT_SYMBOL(enable_irq);
623
Julien Thierryb5259032019-01-31 14:53:58 +0000624/**
625 * enable_nmi - enable handling of an nmi
626 * @irq: Interrupt to enable
627 *
628 * The interrupt to enable must have been requested through request_nmi.
629 * Undoes the effect of one call to disable_nmi(). If this
630 * matches the last disable, processing of interrupts on this
631 * IRQ line is re-enabled.
632 */
633void enable_nmi(unsigned int irq)
634{
635 enable_irq(irq);
636}
637
David Brownell0c5d1eb2008-10-01 14:46:18 -0700638static int set_irq_wake_real(unsigned int irq, unsigned int on)
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200639{
Yinghai Lu08678b02008-08-19 20:50:05 -0700640 struct irq_desc *desc = irq_to_desc(irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200641 int ret = -ENXIO;
642
Santosh Shilimkar60f96b42011-09-09 13:59:35 +0530643 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
644 return 0;
645
Thomas Gleixner2f7e99b2010-09-27 12:45:50 +0000646 if (desc->irq_data.chip->irq_set_wake)
647 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200648
649 return ret;
650}
651
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700652/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100653 * irq_set_irq_wake - control irq power management wakeup
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700654 * @irq: interrupt to control
655 * @on: enable/disable power management wakeup
656 *
David Brownell15a647e2006-07-30 03:03:08 -0700657 * Enable/disable power management wakeup mode, which is
658 * disabled by default. Enables and disables must match,
659 * just as they match for non-wakeup mode support.
660 *
661 * Wakeup mode lets this IRQ wake the system from sleep
662 * states like "suspend to RAM".
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700663 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100664int irq_set_irq_wake(unsigned int irq, unsigned int on)
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700665{
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700666 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100667 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200668 int ret = 0;
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700669
Jesper Juhl13863a62011-06-09 23:14:58 +0200670 if (!desc)
671 return -EINVAL;
672
Julien Thierryb5259032019-01-31 14:53:58 +0000673 /* Don't use NMIs as wake up interrupts please */
674 if (desc->istate & IRQS_NMI) {
675 ret = -EINVAL;
676 goto out_unlock;
677 }
678
David Brownell15a647e2006-07-30 03:03:08 -0700679 /* wakeup-capable irqs can be shared between drivers that
680 * don't need to have the same sleep mode behaviors.
681 */
David Brownell15a647e2006-07-30 03:03:08 -0700682 if (on) {
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200683 if (desc->wake_depth++ == 0) {
684 ret = set_irq_wake_real(irq, on);
685 if (ret)
686 desc->wake_depth = 0;
687 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100688 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200689 }
David Brownell15a647e2006-07-30 03:03:08 -0700690 } else {
691 if (desc->wake_depth == 0) {
Arjan van de Ven7a2c4772008-07-25 01:45:54 -0700692 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200693 } else if (--desc->wake_depth == 0) {
694 ret = set_irq_wake_real(irq, on);
695 if (ret)
696 desc->wake_depth = 1;
697 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100698 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200699 }
David Brownell15a647e2006-07-30 03:03:08 -0700700 }
Julien Thierryb5259032019-01-31 14:53:58 +0000701
702out_unlock:
Thomas Gleixner02725e72011-02-12 10:37:36 +0100703 irq_put_desc_busunlock(desc, flags);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700704 return ret;
705}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100706EXPORT_SYMBOL(irq_set_irq_wake);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700707
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708/*
709 * Internal function that tells the architecture code whether a
710 * particular irq has been exclusively allocated or is available
711 * for driver use.
712 */
713int can_request_irq(unsigned int irq, unsigned long irqflags)
714{
Thomas Gleixnercc8c3b72010-03-23 22:40:53 +0100715 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100716 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100717 int canrequest = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700719 if (!desc)
720 return 0;
721
Thomas Gleixner02725e72011-02-12 10:37:36 +0100722 if (irq_settings_can_request(desc)) {
Ben Hutchings2779db82013-06-28 02:40:30 +0100723 if (!desc->action ||
724 irqflags & desc->action->flags & IRQF_SHARED)
725 canrequest = 1;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100726 }
727 irq_put_desc_unlock(desc, flags);
728 return canrequest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729}
730
Jiang Liua1ff5412015-06-23 19:47:29 +0200731int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700732{
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200733 struct irq_chip *chip = desc->irq_data.chip;
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100734 int ret, unmask = 0;
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700735
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000736 if (!chip || !chip->irq_set_type) {
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700737 /*
738 * IRQF_TRIGGER_* but the PIC does not support multiple
739 * flow-types?
740 */
Jiang Liua1ff5412015-06-23 19:47:29 +0200741 pr_debug("No set_type function for IRQ %d (%s)\n",
742 irq_desc_get_irq(desc),
Thomas Gleixnerf5d89472012-04-19 12:06:13 +0200743 chip ? (chip->name ? : "unknown") : "unknown");
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700744 return 0;
745 }
746
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100747 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
Thomas Gleixner32f41252011-03-28 14:10:52 +0200748 if (!irqd_irq_masked(&desc->irq_data))
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100749 mask_irq(desc);
Thomas Gleixner32f41252011-03-28 14:10:52 +0200750 if (!irqd_irq_disabled(&desc->irq_data))
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100751 unmask = 1;
752 }
753
Alexander Kuleshov00b992d2016-07-19 15:54:08 +0600754 /* Mask all flags except trigger mode */
755 flags &= IRQ_TYPE_SENSE_MASK;
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000756 ret = chip->irq_set_type(&desc->irq_data, flags);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700757
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100758 switch (ret) {
759 case IRQ_SET_MASK_OK:
Jiang Liu2cb62542014-11-06 22:20:18 +0800760 case IRQ_SET_MASK_OK_DONE:
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100761 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
762 irqd_set(&desc->irq_data, flags);
763
764 case IRQ_SET_MASK_OK_NOCOPY:
765 flags = irqd_get_trigger_type(&desc->irq_data);
766 irq_settings_set_trigger_mask(desc, flags);
767 irqd_clear(&desc->irq_data, IRQD_LEVEL);
768 irq_settings_clr_level(desc);
769 if (flags & IRQ_TYPE_LEVEL_MASK) {
770 irq_settings_set_level(desc);
771 irqd_set(&desc->irq_data, IRQD_LEVEL);
772 }
Thomas Gleixner46732472010-06-07 17:53:51 +0200773
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100774 ret = 0;
Thomas Gleixner8fff39e2011-02-21 14:19:42 +0100775 break;
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100776 default:
Andrew Morton97fd75b2012-05-31 16:26:07 -0700777 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
Jiang Liua1ff5412015-06-23 19:47:29 +0200778 flags, irq_desc_get_irq(desc), chip->irq_set_type);
David Brownell0c5d1eb2008-10-01 14:46:18 -0700779 }
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100780 if (unmask)
781 unmask_irq(desc);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700782 return ret;
783}
784
Thomas Gleixner293a7a02012-10-16 15:07:49 -0700785#ifdef CONFIG_HARDIRQS_SW_RESEND
786int irq_set_parent(int irq, int parent_irq)
787{
788 unsigned long flags;
789 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
790
791 if (!desc)
792 return -EINVAL;
793
794 desc->parent_irq = parent_irq;
795
796 irq_put_desc_unlock(desc, flags);
797 return 0;
798}
Sudip Mukherjee3118dac2016-10-06 23:06:43 +0530799EXPORT_SYMBOL_GPL(irq_set_parent);
Thomas Gleixner293a7a02012-10-16 15:07:49 -0700800#endif
801
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200802/*
803 * Default primary interrupt handler for threaded interrupts. Is
804 * assigned as primary handler when request_threaded_irq is called
805 * with handler == NULL. Useful for oneshot interrupts.
806 */
807static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
808{
809 return IRQ_WAKE_THREAD;
810}
811
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200812/*
813 * Primary handler for nested threaded interrupts. Should never be
814 * called.
815 */
816static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
817{
818 WARN(1, "Primary handler called for nested irq %d\n", irq);
819 return IRQ_NONE;
820}
821
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200822static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
823{
824 WARN(1, "Secondary action handler called for irq %d\n", irq);
825 return IRQ_NONE;
826}
827
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100828static int irq_wait_for_interrupt(struct irqaction *action)
829{
Lukas Wunner519cc862018-06-24 10:35:30 +0200830 for (;;) {
831 set_current_state(TASK_INTERRUPTIBLE);
Ido Yariv550acb12011-12-01 13:55:08 +0200832
Lukas Wunner519cc862018-06-24 10:35:30 +0200833 if (kthread_should_stop()) {
834 /* may need to run one last time */
835 if (test_and_clear_bit(IRQTF_RUNTHREAD,
836 &action->thread_flags)) {
837 __set_current_state(TASK_RUNNING);
838 return 0;
839 }
840 __set_current_state(TASK_RUNNING);
841 return -1;
842 }
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100843
844 if (test_and_clear_bit(IRQTF_RUNTHREAD,
845 &action->thread_flags)) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100846 __set_current_state(TASK_RUNNING);
847 return 0;
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100848 }
849 schedule();
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100850 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100851}
852
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200853/*
854 * Oneshot interrupts keep the irq line masked until the threaded
855 * handler finished. unmask if the interrupt has not been disabled and
856 * is marked MASKED.
857 */
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000858static void irq_finalize_oneshot(struct irq_desc *desc,
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100859 struct irqaction *action)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200860{
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200861 if (!(desc->istate & IRQS_ONESHOT) ||
862 action->handler == irq_forced_secondary_handler)
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000863 return;
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100864again:
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000865 chip_bus_lock(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100866 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100867
868 /*
869 * Implausible though it may be we need to protect us against
870 * the following scenario:
871 *
872 * The thread is faster done than the hard interrupt handler
873 * on the other CPU. If we unmask the irq line then the
874 * interrupt can come in again and masks the line, leaves due
Thomas Gleixner009b4c32011-02-07 21:48:49 +0100875 * to IRQS_INPROGRESS and the irq line is masked forever.
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000876 *
877 * This also serializes the state of shared oneshot handlers
878 * versus "desc->threads_onehsot |= action->thread_mask;" in
879 * irq_wake_thread(). See the comment there which explains the
880 * serialization.
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100881 */
Thomas Gleixner32f41252011-03-28 14:10:52 +0200882 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100883 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000884 chip_bus_sync_unlock(desc);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100885 cpu_relax();
886 goto again;
887 }
888
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000889 /*
890 * Now check again, whether the thread should run. Otherwise
891 * we would clear the threads_oneshot bit of this thread which
892 * was just set.
893 */
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100894 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000895 goto out_unlock;
896
897 desc->threads_oneshot &= ~action->thread_mask;
898
Thomas Gleixner32f41252011-03-28 14:10:52 +0200899 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
900 irqd_irq_masked(&desc->irq_data))
Thomas Gleixner328a4972014-03-13 19:03:51 +0100901 unmask_threaded_irq(desc);
Thomas Gleixner32f41252011-03-28 14:10:52 +0200902
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000903out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100904 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000905 chip_bus_sync_unlock(desc);
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200906}
907
Bruno Premont61f38262009-07-22 22:22:32 +0200908#ifdef CONFIG_SMP
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100909/*
Chuansheng Liub04c6442014-02-10 16:13:57 +0800910 * Check whether we need to change the affinity of the interrupt thread.
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200911 */
912static void
913irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
914{
915 cpumask_var_t mask;
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100916 bool valid = true;
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200917
918 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
919 return;
920
921 /*
922 * In case we are out of memory we set IRQTF_AFFINITY again and
923 * try again next time
924 */
925 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
926 set_bit(IRQTF_AFFINITY, &action->thread_flags);
927 return;
928 }
929
Thomas Gleixner239007b2009-11-17 16:46:45 +0100930 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100931 /*
932 * This code is triggered unconditionally. Check the affinity
933 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
934 */
Thomas Gleixnercbf86992018-02-16 15:21:20 +0100935 if (cpumask_available(desc->irq_common_data.affinity)) {
936 const struct cpumask *m;
937
938 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
939 cpumask_copy(mask, m);
940 } else {
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100941 valid = false;
Thomas Gleixnercbf86992018-02-16 15:21:20 +0100942 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100943 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200944
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100945 if (valid)
946 set_cpus_allowed_ptr(current, mask);
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200947 free_cpumask_var(mask);
948}
Bruno Premont61f38262009-07-22 22:22:32 +0200949#else
950static inline void
951irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
952#endif
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200953
954/*
Ingo Molnarc5f48c02018-12-03 11:44:51 +0100955 * Interrupts which are not explicitly requested as threaded
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000956 * interrupts rely on the implicit bh/preempt disable of the hard irq
957 * context. So we need to disable bh here to avoid deadlocks and other
958 * side effects.
959 */
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200960static irqreturn_t
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000961irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
962{
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200963 irqreturn_t ret;
964
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000965 local_bh_disable();
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200966 ret = action->thread_fn(action->irq, action->dev_id);
Lukas Wunner746a9232018-10-18 15:15:05 +0200967 if (ret == IRQ_HANDLED)
968 atomic_inc(&desc->threads_handled);
969
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100970 irq_finalize_oneshot(desc, action);
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000971 local_bh_enable();
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200972 return ret;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000973}
974
975/*
Xie XiuQif788e7b2013-10-18 09:12:04 +0800976 * Interrupts explicitly requested as threaded interrupts want to be
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000977 * preemtible - many of them need to sleep and wait for slow busses to
978 * complete.
979 */
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200980static irqreturn_t irq_thread_fn(struct irq_desc *desc,
981 struct irqaction *action)
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000982{
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200983 irqreturn_t ret;
984
985 ret = action->thread_fn(action->irq, action->dev_id);
Lukas Wunner746a9232018-10-18 15:15:05 +0200986 if (ret == IRQ_HANDLED)
987 atomic_inc(&desc->threads_handled);
988
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100989 irq_finalize_oneshot(desc, action);
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200990 return ret;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000991}
992
Ido Yariv7140ea12011-12-02 18:24:12 +0200993static void wake_threads_waitq(struct irq_desc *desc)
994{
Chuansheng Liuc6856892014-02-24 11:29:50 +0800995 if (atomic_dec_and_test(&desc->threads_active))
Ido Yariv7140ea12011-12-02 18:24:12 +0200996 wake_up(&desc->wait_for_threads);
997}
998
Al Viro67d12142012-06-27 11:07:19 +0400999static void irq_thread_dtor(struct callback_head *unused)
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +10001000{
1001 struct task_struct *tsk = current;
1002 struct irq_desc *desc;
1003 struct irqaction *action;
1004
1005 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1006 return;
1007
1008 action = kthread_data(tsk);
1009
Linus Torvaldsfb21aff2012-05-31 18:47:30 -07001010 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
Alan Cox19af3952012-12-18 14:21:25 -08001011 tsk->comm, tsk->pid, action->irq);
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +10001012
1013
1014 desc = irq_to_desc(action->irq);
1015 /*
1016 * If IRQTF_RUNTHREAD is set, we need to decrement
1017 * desc->threads_active and wake possible waiters.
1018 */
1019 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1020 wake_threads_waitq(desc);
1021
1022 /* Prevent a stale desc->threads_oneshot */
1023 irq_finalize_oneshot(desc, action);
1024}
1025
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001026static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1027{
1028 struct irqaction *secondary = action->secondary;
1029
1030 if (WARN_ON_ONCE(!secondary))
1031 return;
1032
1033 raw_spin_lock_irq(&desc->lock);
1034 __irq_wake_thread(desc, secondary);
1035 raw_spin_unlock_irq(&desc->lock);
1036}
1037
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001038/*
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001039 * Interrupt handler thread
1040 */
1041static int irq_thread(void *data)
1042{
Al Viro67d12142012-06-27 11:07:19 +04001043 struct callback_head on_exit_work;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001044 struct irqaction *action = data;
1045 struct irq_desc *desc = irq_to_desc(action->irq);
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +02001046 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1047 struct irqaction *action);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001048
Alexander Gordeev540b60e2012-03-09 14:59:13 +01001049 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001050 &action->thread_flags))
1051 handler_fn = irq_forced_thread_fn;
1052 else
1053 handler_fn = irq_thread_fn;
1054
Al Viro41f9d292012-06-26 22:10:04 +04001055 init_task_work(&on_exit_work, irq_thread_dtor);
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +10001056 task_work_add(current, &on_exit_work, false);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001057
Sankara Muthukrishnanf3de44e2012-10-31 15:41:23 -05001058 irq_thread_check_affinity(desc, action);
1059
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001060 while (!irq_wait_for_interrupt(action)) {
Ido Yariv7140ea12011-12-02 18:24:12 +02001061 irqreturn_t action_ret;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001062
Thomas Gleixner591d2fb2009-07-21 11:09:39 +02001063 irq_thread_check_affinity(desc, action);
1064
Ido Yariv7140ea12011-12-02 18:24:12 +02001065 action_ret = handler_fn(desc, action);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001066 if (action_ret == IRQ_WAKE_THREAD)
1067 irq_wake_secondary(desc, action);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001068
Ido Yariv7140ea12011-12-02 18:24:12 +02001069 wake_threads_waitq(desc);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001070 }
1071
Ido Yariv7140ea12011-12-02 18:24:12 +02001072 /*
1073 * This is the regular exit path. __free_irq() is stopping the
1074 * thread via kthread_stop() after calling
Lukas Wunner519cc862018-06-24 10:35:30 +02001075 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
Lukas Wunner836557b2018-06-24 10:35:18 +02001076 * oneshot mask bit can be set.
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001077 */
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +10001078 task_work_cancel(current, irq_thread_dtor);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001079 return 0;
1080}
1081
Thomas Gleixnera92444c2014-02-15 00:55:19 +00001082/**
1083 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1084 * @irq: Interrupt line
1085 * @dev_id: Device identity for which the thread should be woken
1086 *
1087 */
1088void irq_wake_thread(unsigned int irq, void *dev_id)
1089{
1090 struct irq_desc *desc = irq_to_desc(irq);
1091 struct irqaction *action;
1092 unsigned long flags;
1093
1094 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1095 return;
1096
1097 raw_spin_lock_irqsave(&desc->lock, flags);
Daniel Lezcanof944b5a2016-01-14 10:54:13 +01001098 for_each_action_of_desc(desc, action) {
Thomas Gleixnera92444c2014-02-15 00:55:19 +00001099 if (action->dev_id == dev_id) {
1100 if (action->thread)
1101 __irq_wake_thread(desc, action);
1102 break;
1103 }
1104 }
1105 raw_spin_unlock_irqrestore(&desc->lock, flags);
1106}
1107EXPORT_SYMBOL_GPL(irq_wake_thread);
1108
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001109static int irq_setup_forced_threading(struct irqaction *new)
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001110{
1111 if (!force_irqthreads)
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001112 return 0;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001113 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001114 return 0;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001115
Thomas Gleixnerd1f03012018-08-03 14:44:59 +02001116 /*
1117 * No further action required for interrupts which are requested as
1118 * threaded interrupts already
1119 */
1120 if (new->handler == irq_default_primary_handler)
1121 return 0;
1122
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001123 new->flags |= IRQF_ONESHOT;
1124
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001125 /*
1126 * Handle the case where we have a real primary handler and a
1127 * thread handler. We force thread them as well by creating a
1128 * secondary action.
1129 */
Thomas Gleixnerd1f03012018-08-03 14:44:59 +02001130 if (new->handler && new->thread_fn) {
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001131 /* Allocate the secondary action */
1132 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1133 if (!new->secondary)
1134 return -ENOMEM;
1135 new->secondary->handler = irq_forced_secondary_handler;
1136 new->secondary->thread_fn = new->thread_fn;
1137 new->secondary->dev_id = new->dev_id;
1138 new->secondary->irq = new->irq;
1139 new->secondary->name = new->name;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001140 }
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001141 /* Deal with the primary handler */
1142 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1143 new->thread_fn = new->handler;
1144 new->handler = irq_default_primary_handler;
1145 return 0;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001146}
1147
Thomas Gleixnerc1bacba2014-03-08 08:59:58 +01001148static int irq_request_resources(struct irq_desc *desc)
1149{
1150 struct irq_data *d = &desc->irq_data;
1151 struct irq_chip *c = d->chip;
1152
1153 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1154}
1155
1156static void irq_release_resources(struct irq_desc *desc)
1157{
1158 struct irq_data *d = &desc->irq_data;
1159 struct irq_chip *c = d->chip;
1160
1161 if (c->irq_release_resources)
1162 c->irq_release_resources(d);
1163}
1164
Julien Thierryb5259032019-01-31 14:53:58 +00001165static bool irq_supports_nmi(struct irq_desc *desc)
1166{
1167 struct irq_data *d = irq_desc_get_irq_data(desc);
1168
1169#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1170 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1171 if (d->parent_data)
1172 return false;
1173#endif
1174 /* Don't support NMIs for chips behind a slow bus */
1175 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1176 return false;
1177
1178 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1179}
1180
1181static int irq_nmi_setup(struct irq_desc *desc)
1182{
1183 struct irq_data *d = irq_desc_get_irq_data(desc);
1184 struct irq_chip *c = d->chip;
1185
1186 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1187}
1188
1189static void irq_nmi_teardown(struct irq_desc *desc)
1190{
1191 struct irq_data *d = irq_desc_get_irq_data(desc);
1192 struct irq_chip *c = d->chip;
1193
1194 if (c->irq_nmi_teardown)
1195 c->irq_nmi_teardown(d);
1196}
1197
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001198static int
1199setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1200{
1201 struct task_struct *t;
1202 struct sched_param param = {
1203 .sched_priority = MAX_USER_RT_PRIO/2,
1204 };
1205
1206 if (!secondary) {
1207 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1208 new->name);
1209 } else {
1210 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1211 new->name);
1212 param.sched_priority -= 1;
1213 }
1214
1215 if (IS_ERR(t))
1216 return PTR_ERR(t);
1217
1218 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1219
1220 /*
1221 * We keep the reference to the task struct even if
1222 * the thread dies to avoid that the interrupt code
1223 * references an already freed task_struct.
1224 */
1225 get_task_struct(t);
1226 new->thread = t;
1227 /*
1228 * Tell the thread to set its affinity. This is
1229 * important for shared interrupt handlers as we do
1230 * not invoke setup_affinity() for the secondary
1231 * handlers as everything is already set up. Even for
1232 * interrupts marked with IRQF_NO_BALANCE this is
1233 * correct as we want the thread to move to the cpu(s)
1234 * on which the requesting code placed the interrupt.
1235 */
1236 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1237 return 0;
1238}
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240/*
1241 * Internal function to register an irqaction - typically used to
1242 * allocate special interrupts that are part of the architecture.
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001243 *
1244 * Locking rules:
1245 *
1246 * desc->request_mutex Provides serialization against a concurrent free_irq()
1247 * chip_bus_lock Provides serialization for slow bus operations
1248 * desc->lock Provides serialization against hard interrupts
1249 *
1250 * chip_bus_lock and desc->lock are sufficient for all other management and
1251 * interrupt related functions. desc->request_mutex solely serializes
1252 * request/free_irq().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 */
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001254static int
Ingo Molnar327ec562009-02-15 11:21:37 +01001255__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256{
Ingo Molnarf17c7542009-02-17 20:43:37 +01001257 struct irqaction *old, **old_ptr;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001258 unsigned long flags, thread_mask = 0;
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001259 int ret, nested, shared = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001261 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -07001262 return -EINVAL;
1263
Thomas Gleixner6b8ff312010-10-01 12:58:38 +02001264 if (desc->irq_data.chip == &no_irq_chip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 return -ENOSYS;
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001266 if (!try_module_get(desc->owner))
1267 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001269 new->irq = irq;
1270
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 /*
Jon Hunter4b357da2016-06-07 16:12:27 +01001272 * If the trigger type is not specified by the caller,
1273 * then use the default for this interrupt.
1274 */
1275 if (!(new->flags & IRQF_TRIGGER_MASK))
1276 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1277
1278 /*
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001279 * Check whether the interrupt nests into another interrupt
1280 * thread.
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001281 */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001282 nested = irq_settings_is_nested_thread(desc);
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001283 if (nested) {
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001284 if (!new->thread_fn) {
1285 ret = -EINVAL;
1286 goto out_mput;
1287 }
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001288 /*
1289 * Replace the primary handler which was provided from
1290 * the driver for non nested interrupt handling by the
1291 * dummy function which warns when called.
1292 */
1293 new->handler = irq_nested_primary_handler;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001294 } else {
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001295 if (irq_settings_can_thread(desc)) {
1296 ret = irq_setup_forced_threading(new);
1297 if (ret)
1298 goto out_mput;
1299 }
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001300 }
1301
1302 /*
1303 * Create a handler thread when a thread function is supplied
1304 * and the interrupt does not nest into another interrupt
1305 * thread.
1306 */
1307 if (new->thread_fn && !nested) {
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001308 ret = setup_irq_thread(new, irq, false);
1309 if (ret)
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001310 goto out_mput;
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001311 if (new->secondary) {
1312 ret = setup_irq_thread(new->secondary, irq, true);
1313 if (ret)
1314 goto out_thread;
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001315 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001316 }
1317
1318 /*
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +02001319 * Drivers are often written to work w/o knowledge about the
1320 * underlying irq chip implementation, so a request for a
1321 * threaded irq without a primary hard irq context handler
1322 * requires the ONESHOT flag to be set. Some irq chips like
1323 * MSI based interrupts are per se one shot safe. Check the
1324 * chip flags, so we can avoid the unmask dance at the end of
1325 * the threaded handler for those.
1326 */
1327 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1328 new->flags &= ~IRQF_ONESHOT;
1329
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001330 /*
1331 * Protects against a concurrent __free_irq() call which might wait
Lukas Wunner519cc862018-06-24 10:35:30 +02001332 * for synchronize_hardirq() to complete without holding the optional
Lukas Wunner836557b2018-06-24 10:35:18 +02001333 * chip bus lock and desc->lock. Also protects against handing out
1334 * a recycled oneshot thread_mask bit while it's still in use by
1335 * its previous owner.
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001336 */
Thomas Gleixner91140142017-06-29 23:33:37 +02001337 mutex_lock(&desc->request_mutex);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001338
1339 /*
1340 * Acquire bus lock as the irq_request_resources() callback below
1341 * might rely on the serialization or the magic power management
1342 * functions which are abusing the irq_bus_lock() callback,
1343 */
1344 chip_bus_lock(desc);
1345
1346 /* First installed action requests resources. */
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001347 if (!desc->action) {
1348 ret = irq_request_resources(desc);
1349 if (ret) {
1350 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1351 new->name, irq, desc->irq_data.chip->name);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001352 goto out_bus_unlock;
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001353 }
1354 }
Thomas Gleixner91140142017-06-29 23:33:37 +02001355
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +02001356 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 * The following block of code has to be executed atomically
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001358 * protected against a concurrent interrupt and any of the other
1359 * management calls which are not serialized via
1360 * desc->request_mutex or the optional bus lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 */
Thomas Gleixner239007b2009-11-17 16:46:45 +01001362 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarf17c7542009-02-17 20:43:37 +01001363 old_ptr = &desc->action;
1364 old = *old_ptr;
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001365 if (old) {
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001366 /*
1367 * Can't share interrupts unless both agree to and are
1368 * the same type (level, edge, polarity). So both flag
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001369 * fields must have IRQF_SHARED set and the bits which
Thomas Gleixner9d591ed2011-02-23 23:52:16 +00001370 * set the trigger type must match. Also all must
1371 * agree on ONESHOT.
Julien Thierryb5259032019-01-31 14:53:58 +00001372 * Interrupt lines used for NMIs cannot be shared.
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001373 */
Marc Zyngier4f8413a2017-11-09 14:17:59 +00001374 unsigned int oldtype;
1375
Julien Thierryb5259032019-01-31 14:53:58 +00001376 if (desc->istate & IRQS_NMI) {
1377 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1378 new->name, irq, desc->irq_data.chip->name);
1379 ret = -EINVAL;
1380 goto out_unlock;
1381 }
1382
Marc Zyngier4f8413a2017-11-09 14:17:59 +00001383 /*
1384 * If nobody did set the configuration before, inherit
1385 * the one provided by the requester.
1386 */
1387 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1388 oldtype = irqd_get_trigger_type(&desc->irq_data);
1389 } else {
1390 oldtype = new->flags & IRQF_TRIGGER_MASK;
1391 irqd_set_trigger_type(&desc->irq_data, oldtype);
1392 }
Hans de Goede382bd4d2017-04-15 12:08:31 +02001393
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001394 if (!((old->flags & new->flags) & IRQF_SHARED) ||
Hans de Goede382bd4d2017-04-15 12:08:31 +02001395 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001396 ((old->flags ^ new->flags) & IRQF_ONESHOT))
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001397 goto mismatch;
1398
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001399 /* All handlers must agree on per-cpuness */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001400 if ((old->flags & IRQF_PERCPU) !=
1401 (new->flags & IRQF_PERCPU))
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001402 goto mismatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
1404 /* add new interrupt at end of irq queue */
1405 do {
Thomas Gleixner52abb702012-03-06 23:18:54 +01001406 /*
1407 * Or all existing action->thread_mask bits,
1408 * so we can find the next zero bit for this
1409 * new action.
1410 */
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001411 thread_mask |= old->thread_mask;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001412 old_ptr = &old->next;
1413 old = *old_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 } while (old);
1415 shared = 1;
1416 }
1417
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001418 /*
Thomas Gleixner52abb702012-03-06 23:18:54 +01001419 * Setup the thread mask for this irqaction for ONESHOT. For
1420 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1421 * conditional in irq_wake_thread().
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001422 */
Thomas Gleixner52abb702012-03-06 23:18:54 +01001423 if (new->flags & IRQF_ONESHOT) {
1424 /*
1425 * Unlikely to have 32 resp 64 irqs sharing one line,
1426 * but who knows.
1427 */
1428 if (thread_mask == ~0UL) {
1429 ret = -EBUSY;
Thomas Gleixnercba42352017-06-20 01:37:21 +02001430 goto out_unlock;
Thomas Gleixner52abb702012-03-06 23:18:54 +01001431 }
1432 /*
1433 * The thread_mask for the action is or'ed to
1434 * desc->thread_active to indicate that the
1435 * IRQF_ONESHOT thread handler has been woken, but not
1436 * yet finished. The bit is cleared when a thread
1437 * completes. When all threads of a shared interrupt
1438 * line have completed desc->threads_active becomes
1439 * zero and the interrupt line is unmasked. See
1440 * handle.c:irq_wake_thread() for further information.
1441 *
1442 * If no thread is woken by primary (hard irq context)
1443 * interrupt handlers, then desc->threads_active is
1444 * also checked for zero to unmask the irq line in the
1445 * affected hard irq flow handlers
1446 * (handle_[fasteoi|level]_irq).
1447 *
1448 * The new action gets the first zero bit of
1449 * thread_mask assigned. See the loop above which or's
1450 * all existing action->thread_mask bits.
1451 */
Rasmus Villemoesffc661c2017-10-30 22:35:47 +01001452 new->thread_mask = 1UL << ffz(thread_mask);
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001453
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +02001454 } else if (new->handler == irq_default_primary_handler &&
1455 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001456 /*
1457 * The interrupt was requested with handler = NULL, so
1458 * we use the default primary handler for it. But it
1459 * does not have the oneshot flag set. In combination
1460 * with level interrupts this is deadly, because the
1461 * default primary handler just wakes the thread, then
1462 * the irq lines is reenabled, but the device still
1463 * has the level irq asserted. Rinse and repeat....
1464 *
1465 * While this works for edge type interrupts, we play
1466 * it safe and reject unconditionally because we can't
1467 * say for sure which type this interrupt really
1468 * has. The type flags are unreliable as the
1469 * underlying chip implementation can override them.
1470 */
Andrew Morton97fd75b2012-05-31 16:26:07 -07001471 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001472 irq);
1473 ret = -EINVAL;
Thomas Gleixnercba42352017-06-20 01:37:21 +02001474 goto out_unlock;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001475 }
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 if (!shared) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001478 init_waitqueue_head(&desc->wait_for_threads);
1479
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001480 /* Setup the type (level, edge polarity) if configured: */
1481 if (new->flags & IRQF_TRIGGER_MASK) {
Jiang Liua1ff5412015-06-23 19:47:29 +02001482 ret = __irq_set_trigger(desc,
1483 new->flags & IRQF_TRIGGER_MASK);
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001484
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001485 if (ret)
Thomas Gleixnercba42352017-06-20 01:37:21 +02001486 goto out_unlock;
Thomas Gleixner091738a2011-02-14 20:16:43 +01001487 }
Ahmed S. Darwishf75d2222007-05-08 00:27:55 -07001488
Thomas Gleixnerc942cee2017-09-13 23:29:09 +02001489 /*
1490 * Activate the interrupt. That activation must happen
1491 * independently of IRQ_NOAUTOEN. request_irq() can fail
1492 * and the callers are supposed to handle
1493 * that. enable_irq() of an interrupt requested with
1494 * IRQ_NOAUTOEN is not supposed to fail. The activation
1495 * keeps it in shutdown mode, it merily associates
1496 * resources if necessary and if that's not possible it
1497 * fails. Interrupts which are in managed shutdown mode
1498 * will simply ignore that activation request.
1499 */
1500 ret = irq_activate(desc);
1501 if (ret)
1502 goto out_unlock;
1503
Thomas Gleixner009b4c32011-02-07 21:48:49 +01001504 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
Thomas Gleixner32f41252011-03-28 14:10:52 +02001505 IRQS_ONESHOT | IRQS_WAITING);
1506 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
Thomas Gleixner94d39e12006-06-29 02:24:50 -07001507
Thomas Gleixnera0056772011-02-08 17:11:03 +01001508 if (new->flags & IRQF_PERCPU) {
1509 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1510 irq_settings_set_per_cpu(desc);
1511 }
Thomas Gleixner6a58fb32011-02-08 15:40:05 +01001512
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001513 if (new->flags & IRQF_ONESHOT)
Thomas Gleixner3d67bae2011-02-07 21:02:10 +01001514 desc->istate |= IRQS_ONESHOT;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001515
Thomas Gleixner2e051552017-06-20 01:37:23 +02001516 /* Exclude IRQ from balancing if requested */
1517 if (new->flags & IRQF_NOBALANCING) {
1518 irq_settings_set_no_balancing(desc);
1519 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1520 }
1521
Thomas Gleixner04c848d2017-05-31 11:58:33 +02001522 if (irq_settings_can_autoenable(desc)) {
Thomas Gleixner4cde9c62017-06-20 01:37:49 +02001523 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
Thomas Gleixner04c848d2017-05-31 11:58:33 +02001524 } else {
1525 /*
1526 * Shared interrupts do not go well with disabling
1527 * auto enable. The sharing interrupt might request
1528 * it while it's still disabled and then wait for
1529 * interrupts forever.
1530 */
1531 WARN_ON_ONCE(new->flags & IRQF_SHARED);
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001532 /* Undo nested disables: */
1533 desc->depth = 1;
Thomas Gleixner04c848d2017-05-31 11:58:33 +02001534 }
Max Krasnyansky18404752008-05-29 11:02:52 -07001535
Thomas Gleixner876dbd42011-02-08 17:28:12 +01001536 } else if (new->flags & IRQF_TRIGGER_MASK) {
1537 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
Thomas Gleixner7ee7e872016-11-07 19:57:00 +01001538 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
Thomas Gleixner876dbd42011-02-08 17:28:12 +01001539
1540 if (nmsk != omsk)
1541 /* hope the handler works with current trigger mode */
Joe Perchesa395d6a2016-03-22 14:28:09 -07001542 pr_warn("irq %d uses trigger mode %u; requested %u\n",
Thomas Gleixner7ee7e872016-11-07 19:57:00 +01001543 irq, omsk, nmsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 }
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001545
Ingo Molnarf17c7542009-02-17 20:43:37 +01001546 *old_ptr = new;
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001547
Thomas Gleixnercab303b2014-08-28 11:44:31 +02001548 irq_pm_install_action(desc, new);
1549
Linus Torvalds8528b0f2007-01-23 14:16:31 -08001550 /* Reset broken irq detection when installing new handler */
1551 desc->irq_count = 0;
1552 desc->irqs_unhandled = 0;
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001553
1554 /*
1555 * Check whether we disabled the irq via the spurious handler
1556 * before. Reenable it and give it another chance.
1557 */
Thomas Gleixner7acdd532011-02-07 20:40:54 +01001558 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1559 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
Jiang Liu79ff1cd2015-06-23 19:52:36 +02001560 __enable_irq(desc);
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001561 }
1562
Thomas Gleixner239007b2009-11-17 16:46:45 +01001563 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner3a907952017-06-29 23:33:36 +02001564 chip_bus_sync_unlock(desc);
Thomas Gleixner91140142017-06-29 23:33:37 +02001565 mutex_unlock(&desc->request_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Daniel Lezcanob2d3d612017-06-23 16:11:07 +02001567 irq_setup_timings(desc, new);
1568
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001569 /*
1570 * Strictly no need to wake it up, but hung_task complains
1571 * when no hard interrupt wakes the thread up.
1572 */
1573 if (new->thread)
1574 wake_up_process(new->thread);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001575 if (new->secondary)
1576 wake_up_process(new->secondary->thread);
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001577
Yinghai Lu2c6927a2008-08-19 20:50:11 -07001578 register_irq_proc(irq, desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 new->dir = NULL;
1580 register_handler_proc(irq, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 return 0;
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001582
1583mismatch:
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001584 if (!(new->flags & IRQF_PROBE_SHARED)) {
Andrew Morton97fd75b2012-05-31 16:26:07 -07001585 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001586 irq, new->flags, new->name, old->flags, old->name);
1587#ifdef CONFIG_DEBUG_SHIRQ
Andrew Morton13e87ec2006-04-27 18:39:18 -07001588 dump_stack();
Alan Cox3f050442007-02-12 00:52:04 -08001589#endif
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001590 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001591 ret = -EBUSY;
1592
Thomas Gleixnercba42352017-06-20 01:37:21 +02001593out_unlock:
Dan Carpenter1c389792011-03-17 14:43:07 +03001594 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001595
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001596 if (!desc->action)
1597 irq_release_resources(desc);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001598out_bus_unlock:
1599 chip_bus_sync_unlock(desc);
Thomas Gleixner91140142017-06-29 23:33:37 +02001600 mutex_unlock(&desc->request_mutex);
1601
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001602out_thread:
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001603 if (new->thread) {
1604 struct task_struct *t = new->thread;
1605
1606 new->thread = NULL;
Alexander Gordeev05d74ef2012-03-09 14:59:40 +01001607 kthread_stop(t);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001608 put_task_struct(t);
1609 }
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001610 if (new->secondary && new->secondary->thread) {
1611 struct task_struct *t = new->secondary->thread;
1612
1613 new->secondary->thread = NULL;
1614 kthread_stop(t);
1615 put_task_struct(t);
1616 }
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001617out_mput:
1618 module_put(desc->owner);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001619 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620}
1621
1622/**
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001623 * setup_irq - setup an interrupt
1624 * @irq: Interrupt line to setup
1625 * @act: irqaction for the interrupt
1626 *
1627 * Used to statically setup interrupts in the early boot process.
1628 */
1629int setup_irq(unsigned int irq, struct irqaction *act)
1630{
David Daney986c0112011-02-09 16:04:25 -08001631 int retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001632 struct irq_desc *desc = irq_to_desc(irq);
1633
Jon Hunter9b5d5852016-05-10 16:14:35 +01001634 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001635 return -EINVAL;
Jon Hunterbe45beb2016-06-07 16:12:29 +01001636
1637 retval = irq_chip_pm_get(&desc->irq_data);
1638 if (retval < 0)
1639 return retval;
1640
David Daney986c0112011-02-09 16:04:25 -08001641 retval = __setup_irq(irq, desc, act);
David Daney986c0112011-02-09 16:04:25 -08001642
Jon Hunterbe45beb2016-06-07 16:12:29 +01001643 if (retval)
1644 irq_chip_pm_put(&desc->irq_data);
1645
David Daney986c0112011-02-09 16:04:25 -08001646 return retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001647}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001648EXPORT_SYMBOL_GPL(setup_irq);
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001649
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001650/*
Magnus Dammcbf94f02009-03-12 21:05:51 +09001651 * Internal function to unregister an irqaction - used to free
1652 * regular and special interrupts that are part of the architecture.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 */
Uwe Kleine König83ac4ca2018-03-19 11:52:02 +01001654static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655{
Uwe Kleine König83ac4ca2018-03-19 11:52:02 +01001656 unsigned irq = desc->irq_data.irq;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001657 struct irqaction *action, **action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 unsigned long flags;
1659
Ingo Molnarae88a232009-02-15 11:29:50 +01001660 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001661
Thomas Gleixner91140142017-06-29 23:33:37 +02001662 mutex_lock(&desc->request_mutex);
Thomas Gleixnerabc7e402015-12-13 18:12:30 +01001663 chip_bus_lock(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +01001664 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarae88a232009-02-15 11:29:50 +01001665
1666 /*
1667 * There can be multiple actions per IRQ descriptor, find the right
1668 * one based on the dev_id:
1669 */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001670 action_ptr = &desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 for (;;) {
Ingo Molnarf17c7542009-02-17 20:43:37 +01001672 action = *action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Ingo Molnarae88a232009-02-15 11:29:50 +01001674 if (!action) {
1675 WARN(1, "Trying to free already-free IRQ %d\n", irq);
Thomas Gleixner239007b2009-11-17 16:46:45 +01001676 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixnerabc7e402015-12-13 18:12:30 +01001677 chip_bus_sync_unlock(desc);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001678 mutex_unlock(&desc->request_mutex);
Magnus Dammf21cfb22009-03-12 21:05:42 +09001679 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001681
Ingo Molnar8316e382009-02-17 20:28:29 +01001682 if (action->dev_id == dev_id)
1683 break;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001684 action_ptr = &action->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001686
1687 /* Found it - now remove it from the list of entries: */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001688 *action_ptr = action->next;
Ingo Molnarae88a232009-02-15 11:29:50 +01001689
Thomas Gleixnercab303b2014-08-28 11:44:31 +02001690 irq_pm_remove_action(desc, action);
1691
Ingo Molnarae88a232009-02-15 11:29:50 +01001692 /* If this was the last handler, shut down the IRQ line: */
Thomas Gleixnerc1bacba2014-03-08 08:59:58 +01001693 if (!desc->action) {
Thomas Gleixnere9849772015-10-09 23:28:58 +02001694 irq_settings_clr_disable_unlazy(desc);
Thomas Gleixner46999232011-02-02 21:41:14 +00001695 irq_shutdown(desc);
Thomas Gleixnerc1bacba2014-03-08 08:59:58 +01001696 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001697
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -07001698#ifdef CONFIG_SMP
1699 /* make sure affinity_hint is cleaned up */
1700 if (WARN_ON_ONCE(desc->affinity_hint))
1701 desc->affinity_hint = NULL;
1702#endif
1703
Thomas Gleixner239007b2009-11-17 16:46:45 +01001704 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001705 /*
1706 * Drop bus_lock here so the changes which were done in the chip
1707 * callbacks above are synced out to the irq chips which hang
Lukas Wunner519cc862018-06-24 10:35:30 +02001708 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001709 *
1710 * Aside of that the bus_lock can also be taken from the threaded
1711 * handler in irq_finalize_oneshot() which results in a deadlock
Lukas Wunner519cc862018-06-24 10:35:30 +02001712 * because kthread_stop() would wait forever for the thread to
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001713 * complete, which is blocked on the bus lock.
1714 *
1715 * The still held desc->request_mutex() protects against a
1716 * concurrent request_irq() of this irq so the release of resources
1717 * and timing data is properly serialized.
1718 */
Thomas Gleixnerabc7e402015-12-13 18:12:30 +01001719 chip_bus_sync_unlock(desc);
Ingo Molnarae88a232009-02-15 11:29:50 +01001720
1721 unregister_handler_proc(irq, action);
1722
1723 /* Make sure it's not being used on another CPU: */
Lukas Wunner519cc862018-06-24 10:35:30 +02001724 synchronize_hardirq(irq);
Ingo Molnarae88a232009-02-15 11:29:50 +01001725
1726#ifdef CONFIG_DEBUG_SHIRQ
1727 /*
1728 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1729 * event to happen even now it's being freed, so let's make sure that
1730 * is so by doing an extra call to the handler ....
1731 *
1732 * ( We do this after actually deregistering it, to make sure that a
Jonathan Neuschäfer0a13ec02018-06-17 14:40:18 +02001733 * 'real' IRQ doesn't run in parallel with our fake. )
Ingo Molnarae88a232009-02-15 11:29:50 +01001734 */
1735 if (action->flags & IRQF_SHARED) {
1736 local_irq_save(flags);
1737 action->handler(irq, dev_id);
1738 local_irq_restore(flags);
1739 }
1740#endif
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001741
Lukas Wunner519cc862018-06-24 10:35:30 +02001742 /*
1743 * The action has already been removed above, but the thread writes
1744 * its oneshot mask bit when it completes. Though request_mutex is
1745 * held across this which prevents __setup_irq() from handing out
1746 * the same bit to a newly requested action.
1747 */
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001748 if (action->thread) {
Alexander Gordeev05d74ef2012-03-09 14:59:40 +01001749 kthread_stop(action->thread);
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001750 put_task_struct(action->thread);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001751 if (action->secondary && action->secondary->thread) {
1752 kthread_stop(action->secondary->thread);
1753 put_task_struct(action->secondary->thread);
1754 }
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001755 }
1756
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001757 /* Last action releases resources */
Thomas Gleixner23438772017-06-29 23:33:39 +02001758 if (!desc->action) {
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001759 /*
1760 * Reaquire bus lock as irq_release_resources() might
1761 * require it to deallocate resources over the slow bus.
1762 */
1763 chip_bus_lock(desc);
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001764 irq_release_resources(desc);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001765 chip_bus_sync_unlock(desc);
Thomas Gleixner23438772017-06-29 23:33:39 +02001766 irq_remove_timings(desc);
1767 }
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001768
Thomas Gleixner91140142017-06-29 23:33:37 +02001769 mutex_unlock(&desc->request_mutex);
1770
Jon Hunterbe45beb2016-06-07 16:12:29 +01001771 irq_chip_pm_put(&desc->irq_data);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001772 module_put(desc->owner);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001773 kfree(action->secondary);
Magnus Dammf21cfb22009-03-12 21:05:42 +09001774 return action;
1775}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
1777/**
Magnus Dammcbf94f02009-03-12 21:05:51 +09001778 * remove_irq - free an interrupt
1779 * @irq: Interrupt line to free
1780 * @act: irqaction for the interrupt
1781 *
1782 * Used to remove interrupts statically setup by the early boot process.
1783 */
1784void remove_irq(unsigned int irq, struct irqaction *act)
1785{
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001786 struct irq_desc *desc = irq_to_desc(irq);
1787
1788 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Uwe Kleine König83ac4ca2018-03-19 11:52:02 +01001789 __free_irq(desc, act->dev_id);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001790}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001791EXPORT_SYMBOL_GPL(remove_irq);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001792
1793/**
Magnus Dammf21cfb22009-03-12 21:05:42 +09001794 * free_irq - free an interrupt allocated with request_irq
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 * @irq: Interrupt line to free
1796 * @dev_id: Device identity to free
1797 *
1798 * Remove an interrupt handler. The handler is removed and if the
1799 * interrupt line is no longer in use by any driver it is disabled.
1800 * On a shared IRQ the caller must ensure the interrupt is disabled
1801 * on the card it drives before calling this function. The function
1802 * does not return until any executing interrupts for this IRQ
1803 * have completed.
1804 *
1805 * This function must not be called from interrupt context.
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001806 *
1807 * Returns the devname argument passed to request_irq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 */
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001809const void *free_irq(unsigned int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001811 struct irq_desc *desc = irq_to_desc(irq);
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001812 struct irqaction *action;
1813 const char *devname;
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001814
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001815 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001816 return NULL;
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001817
Ben Hutchingscd7eab42011-01-19 21:01:44 +00001818#ifdef CONFIG_SMP
1819 if (WARN_ON(desc->affinity_notify))
1820 desc->affinity_notify = NULL;
1821#endif
1822
Uwe Kleine König83ac4ca2018-03-19 11:52:02 +01001823 action = __free_irq(desc, dev_id);
Alexandru Moise2827a412017-09-19 22:04:12 +02001824
1825 if (!action)
1826 return NULL;
1827
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001828 devname = action->name;
1829 kfree(action);
1830 return devname;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832EXPORT_SYMBOL(free_irq);
1833
Julien Thierryb5259032019-01-31 14:53:58 +00001834/* This function must be called with desc->lock held */
1835static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1836{
1837 const char *devname = NULL;
1838
1839 desc->istate &= ~IRQS_NMI;
1840
1841 if (!WARN_ON(desc->action == NULL)) {
1842 irq_pm_remove_action(desc, desc->action);
1843 devname = desc->action->name;
1844 unregister_handler_proc(irq, desc->action);
1845
1846 kfree(desc->action);
1847 desc->action = NULL;
1848 }
1849
1850 irq_settings_clr_disable_unlazy(desc);
1851 irq_shutdown(desc);
1852
1853 irq_release_resources(desc);
1854
1855 irq_chip_pm_put(&desc->irq_data);
1856 module_put(desc->owner);
1857
1858 return devname;
1859}
1860
1861const void *free_nmi(unsigned int irq, void *dev_id)
1862{
1863 struct irq_desc *desc = irq_to_desc(irq);
1864 unsigned long flags;
1865 const void *devname;
1866
1867 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1868 return NULL;
1869
1870 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1871 return NULL;
1872
1873 /* NMI still enabled */
1874 if (WARN_ON(desc->depth == 0))
1875 disable_nmi_nosync(irq);
1876
1877 raw_spin_lock_irqsave(&desc->lock, flags);
1878
1879 irq_nmi_teardown(desc);
1880 devname = __cleanup_nmi(irq, desc);
1881
1882 raw_spin_unlock_irqrestore(&desc->lock, flags);
1883
1884 return devname;
1885}
1886
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887/**
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001888 * request_threaded_irq - allocate an interrupt line
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 * @irq: Interrupt line to allocate
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001890 * @handler: Function to be called when the IRQ occurs.
1891 * Primary handler for threaded interrupts
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001892 * If NULL and thread_fn != NULL the default
1893 * primary handler is installed
Thomas Gleixnerf48fe812009-03-24 11:46:22 +01001894 * @thread_fn: Function called from the irq handler thread
1895 * If NULL, no irq thread is created
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 * @irqflags: Interrupt type flags
1897 * @devname: An ascii name for the claiming device
1898 * @dev_id: A cookie passed back to the handler function
1899 *
1900 * This call allocates interrupt resources and enables the
1901 * interrupt line and IRQ handling. From the point this
1902 * call is made your handler function may be invoked. Since
1903 * your handler function must clear any interrupt the board
1904 * raises, you must take care both to initialise your hardware
1905 * and to set up the interrupt handler in the right order.
1906 *
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001907 * If you want to set up a threaded irq handler for your device
Javi Merino6d21af42011-10-26 10:16:11 +01001908 * then you need to supply @handler and @thread_fn. @handler is
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001909 * still called in hard interrupt context and has to check
1910 * whether the interrupt originates from the device. If yes it
1911 * needs to disable the interrupt on the device and return
Steven Rostedt39a2edd2009-05-12 14:35:54 -04001912 * IRQ_WAKE_THREAD which will wake up the handler thread and run
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001913 * @thread_fn. This split handler design is necessary to support
1914 * shared interrupts.
1915 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 * Dev_id must be globally unique. Normally the address of the
1917 * device data structure is used as the cookie. Since the handler
1918 * receives this value it makes sense to use it.
1919 *
1920 * If your interrupt is shared you must pass a non NULL dev_id
1921 * as this is required when freeing the interrupt.
1922 *
1923 * Flags:
1924 *
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001925 * IRQF_SHARED Interrupt is shared
David Brownell0c5d1eb2008-10-01 14:46:18 -07001926 * IRQF_TRIGGER_* Specify active edge(s) or level
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 *
1928 */
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001929int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1930 irq_handler_t thread_fn, unsigned long irqflags,
1931 const char *devname, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932{
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001933 struct irqaction *action;
Yinghai Lu08678b02008-08-19 20:50:05 -07001934 struct irq_desc *desc;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001935 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Chen Fane237a552016-02-15 12:52:01 +08001937 if (irq == IRQ_NOTCONNECTED)
1938 return -ENOTCONN;
1939
David Brownell470c6622008-12-01 14:31:37 -08001940 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 * Sanity-check: shared interrupts must pass in a real dev-ID,
1942 * otherwise we'll have trouble later trying to figure out
1943 * which interrupt is which (messes up the interrupt freeing
1944 * logic etc).
Rafael J. Wysocki17f48032015-02-27 00:07:55 +01001945 *
1946 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1947 * it cannot be set along with IRQF_NO_SUSPEND.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 */
Rafael J. Wysocki17f48032015-02-27 00:07:55 +01001949 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1950 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1951 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001953
Yinghai Lucb5bc832008-08-19 20:50:17 -07001954 desc = irq_to_desc(irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001955 if (!desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001957
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001958 if (!irq_settings_can_request(desc) ||
1959 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Thomas Gleixner6550c772006-06-29 02:24:49 -07001960 return -EINVAL;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001961
1962 if (!handler) {
1963 if (!thread_fn)
1964 return -EINVAL;
1965 handler = irq_default_primary_handler;
1966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Thomas Gleixner45535732009-02-22 23:00:32 +01001968 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 if (!action)
1970 return -ENOMEM;
1971
1972 action->handler = handler;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001973 action->thread_fn = thread_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 action->flags = irqflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 action->name = devname;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 action->dev_id = dev_id;
1977
Jon Hunterbe45beb2016-06-07 16:12:29 +01001978 retval = irq_chip_pm_get(&desc->irq_data);
Shawn Lin4396f462016-08-22 16:21:52 +08001979 if (retval < 0) {
1980 kfree(action);
Jon Hunterbe45beb2016-06-07 16:12:29 +01001981 return retval;
Shawn Lin4396f462016-08-22 16:21:52 +08001982 }
Jon Hunterbe45beb2016-06-07 16:12:29 +01001983
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001984 retval = __setup_irq(irq, desc, action);
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001985
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001986 if (retval) {
Jon Hunterbe45beb2016-06-07 16:12:29 +01001987 irq_chip_pm_put(&desc->irq_data);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001988 kfree(action->secondary);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001989 kfree(action);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001990 }
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001991
Thomas Gleixner6d83f942011-02-18 23:27:23 +01001992#ifdef CONFIG_DEBUG_SHIRQ_FIXME
Luis Henriques6ce51c42009-04-01 18:06:35 +01001993 if (!retval && (irqflags & IRQF_SHARED)) {
David Woodhousea304e1b2007-02-12 00:52:00 -08001994 /*
1995 * It's a shared IRQ -- the driver ought to be prepared for it
1996 * to happen immediately, so let's make sure....
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001997 * We disable the irq to make sure that a 'real' IRQ doesn't
1998 * run in parallel with our fake.
David Woodhousea304e1b2007-02-12 00:52:00 -08001999 */
Jarek Poplawski59845b12007-08-30 23:56:34 -07002000 unsigned long flags;
David Woodhousea304e1b2007-02-12 00:52:00 -08002001
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04002002 disable_irq(irq);
Jarek Poplawski59845b12007-08-30 23:56:34 -07002003 local_irq_save(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04002004
Jarek Poplawski59845b12007-08-30 23:56:34 -07002005 handler(irq, dev_id);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04002006
Jarek Poplawski59845b12007-08-30 23:56:34 -07002007 local_irq_restore(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04002008 enable_irq(irq);
David Woodhousea304e1b2007-02-12 00:52:00 -08002009 }
2010#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 return retval;
2012}
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01002013EXPORT_SYMBOL(request_threaded_irq);
Marc Zyngierae731f82010-03-15 22:56:33 +00002014
2015/**
2016 * request_any_context_irq - allocate an interrupt line
2017 * @irq: Interrupt line to allocate
2018 * @handler: Function to be called when the IRQ occurs.
2019 * Threaded handler for threaded interrupts.
2020 * @flags: Interrupt type flags
2021 * @name: An ascii name for the claiming device
2022 * @dev_id: A cookie passed back to the handler function
2023 *
2024 * This call allocates interrupt resources and enables the
2025 * interrupt line and IRQ handling. It selects either a
2026 * hardirq or threaded handling method depending on the
2027 * context.
2028 *
2029 * On failure, it returns a negative value. On success,
2030 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2031 */
2032int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2033 unsigned long flags, const char *name, void *dev_id)
2034{
Chen Fane237a552016-02-15 12:52:01 +08002035 struct irq_desc *desc;
Marc Zyngierae731f82010-03-15 22:56:33 +00002036 int ret;
2037
Chen Fane237a552016-02-15 12:52:01 +08002038 if (irq == IRQ_NOTCONNECTED)
2039 return -ENOTCONN;
2040
2041 desc = irq_to_desc(irq);
Marc Zyngierae731f82010-03-15 22:56:33 +00002042 if (!desc)
2043 return -EINVAL;
2044
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01002045 if (irq_settings_is_nested_thread(desc)) {
Marc Zyngierae731f82010-03-15 22:56:33 +00002046 ret = request_threaded_irq(irq, NULL, handler,
2047 flags, name, dev_id);
2048 return !ret ? IRQC_IS_NESTED : ret;
2049 }
2050
2051 ret = request_irq(irq, handler, flags, name, dev_id);
2052 return !ret ? IRQC_IS_HARDIRQ : ret;
2053}
2054EXPORT_SYMBOL_GPL(request_any_context_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002055
Julien Thierryb5259032019-01-31 14:53:58 +00002056/**
2057 * request_nmi - allocate an interrupt line for NMI delivery
2058 * @irq: Interrupt line to allocate
2059 * @handler: Function to be called when the IRQ occurs.
2060 * Threaded handler for threaded interrupts.
2061 * @irqflags: Interrupt type flags
2062 * @name: An ascii name for the claiming device
2063 * @dev_id: A cookie passed back to the handler function
2064 *
2065 * This call allocates interrupt resources and enables the
2066 * interrupt line and IRQ handling. It sets up the IRQ line
2067 * to be handled as an NMI.
2068 *
2069 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2070 * cannot be threaded.
2071 *
2072 * Interrupt lines requested for NMI delivering must produce per cpu
2073 * interrupts and have auto enabling setting disabled.
2074 *
2075 * Dev_id must be globally unique. Normally the address of the
2076 * device data structure is used as the cookie. Since the handler
2077 * receives this value it makes sense to use it.
2078 *
2079 * If the interrupt line cannot be used to deliver NMIs, function
2080 * will fail and return a negative value.
2081 */
2082int request_nmi(unsigned int irq, irq_handler_t handler,
2083 unsigned long irqflags, const char *name, void *dev_id)
2084{
2085 struct irqaction *action;
2086 struct irq_desc *desc;
2087 unsigned long flags;
2088 int retval;
2089
2090 if (irq == IRQ_NOTCONNECTED)
2091 return -ENOTCONN;
2092
2093 /* NMI cannot be shared, used for Polling */
2094 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2095 return -EINVAL;
2096
2097 if (!(irqflags & IRQF_PERCPU))
2098 return -EINVAL;
2099
2100 if (!handler)
2101 return -EINVAL;
2102
2103 desc = irq_to_desc(irq);
2104
2105 if (!desc || irq_settings_can_autoenable(desc) ||
2106 !irq_settings_can_request(desc) ||
2107 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2108 !irq_supports_nmi(desc))
2109 return -EINVAL;
2110
2111 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2112 if (!action)
2113 return -ENOMEM;
2114
2115 action->handler = handler;
2116 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2117 action->name = name;
2118 action->dev_id = dev_id;
2119
2120 retval = irq_chip_pm_get(&desc->irq_data);
2121 if (retval < 0)
2122 goto err_out;
2123
2124 retval = __setup_irq(irq, desc, action);
2125 if (retval)
2126 goto err_irq_setup;
2127
2128 raw_spin_lock_irqsave(&desc->lock, flags);
2129
2130 /* Setup NMI state */
2131 desc->istate |= IRQS_NMI;
2132 retval = irq_nmi_setup(desc);
2133 if (retval) {
2134 __cleanup_nmi(irq, desc);
2135 raw_spin_unlock_irqrestore(&desc->lock, flags);
2136 return -EINVAL;
2137 }
2138
2139 raw_spin_unlock_irqrestore(&desc->lock, flags);
2140
2141 return 0;
2142
2143err_irq_setup:
2144 irq_chip_pm_put(&desc->irq_data);
2145err_out:
2146 kfree(action);
2147
2148 return retval;
2149}
2150
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01002151void enable_percpu_irq(unsigned int irq, unsigned int type)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002152{
2153 unsigned int cpu = smp_processor_id();
2154 unsigned long flags;
2155 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2156
2157 if (!desc)
2158 return;
2159
Marc Zyngierf35ad082016-06-13 10:39:44 +01002160 /*
2161 * If the trigger type is not specified by the caller, then
2162 * use the default for this interrupt.
2163 */
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01002164 type &= IRQ_TYPE_SENSE_MASK;
Marc Zyngierf35ad082016-06-13 10:39:44 +01002165 if (type == IRQ_TYPE_NONE)
2166 type = irqd_get_trigger_type(&desc->irq_data);
2167
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01002168 if (type != IRQ_TYPE_NONE) {
2169 int ret;
2170
Jiang Liua1ff5412015-06-23 19:47:29 +02002171 ret = __irq_set_trigger(desc, type);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01002172
2173 if (ret) {
Thomas Gleixner32cffdd2011-10-04 18:43:57 +02002174 WARN(1, "failed to set type for IRQ%d\n", irq);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01002175 goto out;
2176 }
2177 }
2178
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002179 irq_percpu_enable(desc, cpu);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01002180out:
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002181 irq_put_desc_unlock(desc, flags);
2182}
Chris Metcalf36a5df82013-02-01 15:04:26 -05002183EXPORT_SYMBOL_GPL(enable_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002184
Julien Thierry4b078c32019-01-31 14:53:59 +00002185void enable_percpu_nmi(unsigned int irq, unsigned int type)
2186{
2187 enable_percpu_irq(irq, type);
2188}
2189
Thomas Petazzonif0cb3222015-10-20 15:23:51 +02002190/**
2191 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2192 * @irq: Linux irq number to check for
2193 *
2194 * Must be called from a non migratable context. Returns the enable
2195 * state of a per cpu interrupt on the current cpu.
2196 */
2197bool irq_percpu_is_enabled(unsigned int irq)
2198{
2199 unsigned int cpu = smp_processor_id();
2200 struct irq_desc *desc;
2201 unsigned long flags;
2202 bool is_enabled;
2203
2204 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2205 if (!desc)
2206 return false;
2207
2208 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2209 irq_put_desc_unlock(desc, flags);
2210
2211 return is_enabled;
2212}
2213EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2214
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002215void disable_percpu_irq(unsigned int irq)
2216{
2217 unsigned int cpu = smp_processor_id();
2218 unsigned long flags;
2219 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2220
2221 if (!desc)
2222 return;
2223
2224 irq_percpu_disable(desc, cpu);
2225 irq_put_desc_unlock(desc, flags);
2226}
Chris Metcalf36a5df82013-02-01 15:04:26 -05002227EXPORT_SYMBOL_GPL(disable_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002228
Julien Thierry4b078c32019-01-31 14:53:59 +00002229void disable_percpu_nmi(unsigned int irq)
2230{
2231 disable_percpu_irq(irq);
2232}
2233
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002234/*
2235 * Internal function to unregister a percpu irqaction.
2236 */
2237static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2238{
2239 struct irq_desc *desc = irq_to_desc(irq);
2240 struct irqaction *action;
2241 unsigned long flags;
2242
2243 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2244
2245 if (!desc)
2246 return NULL;
2247
2248 raw_spin_lock_irqsave(&desc->lock, flags);
2249
2250 action = desc->action;
2251 if (!action || action->percpu_dev_id != dev_id) {
2252 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2253 goto bad;
2254 }
2255
2256 if (!cpumask_empty(desc->percpu_enabled)) {
2257 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2258 irq, cpumask_first(desc->percpu_enabled));
2259 goto bad;
2260 }
2261
2262 /* Found it - now remove it from the list of entries: */
2263 desc->action = NULL;
2264
Julien Thierry4b078c32019-01-31 14:53:59 +00002265 desc->istate &= ~IRQS_NMI;
2266
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002267 raw_spin_unlock_irqrestore(&desc->lock, flags);
2268
2269 unregister_handler_proc(irq, action);
2270
Jon Hunterbe45beb2016-06-07 16:12:29 +01002271 irq_chip_pm_put(&desc->irq_data);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002272 module_put(desc->owner);
2273 return action;
2274
2275bad:
2276 raw_spin_unlock_irqrestore(&desc->lock, flags);
2277 return NULL;
2278}
2279
2280/**
2281 * remove_percpu_irq - free a per-cpu interrupt
2282 * @irq: Interrupt line to free
2283 * @act: irqaction for the interrupt
2284 *
2285 * Used to remove interrupts statically setup by the early boot process.
2286 */
2287void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2288{
2289 struct irq_desc *desc = irq_to_desc(irq);
2290
2291 if (desc && irq_settings_is_per_cpu_devid(desc))
2292 __free_percpu_irq(irq, act->percpu_dev_id);
2293}
2294
2295/**
2296 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2297 * @irq: Interrupt line to free
2298 * @dev_id: Device identity to free
2299 *
2300 * Remove a percpu interrupt handler. The handler is removed, but
2301 * the interrupt line is not disabled. This must be done on each
2302 * CPU before calling this function. The function does not return
2303 * until any executing interrupts for this IRQ have completed.
2304 *
2305 * This function must not be called from interrupt context.
2306 */
2307void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2308{
2309 struct irq_desc *desc = irq_to_desc(irq);
2310
2311 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2312 return;
2313
2314 chip_bus_lock(desc);
2315 kfree(__free_percpu_irq(irq, dev_id));
2316 chip_bus_sync_unlock(desc);
2317}
Maxime Ripardaec2e2a2015-09-25 18:09:33 +02002318EXPORT_SYMBOL_GPL(free_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002319
Julien Thierry4b078c32019-01-31 14:53:59 +00002320void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2321{
2322 struct irq_desc *desc = irq_to_desc(irq);
2323
2324 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2325 return;
2326
2327 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2328 return;
2329
2330 kfree(__free_percpu_irq(irq, dev_id));
2331}
2332
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002333/**
2334 * setup_percpu_irq - setup a per-cpu interrupt
2335 * @irq: Interrupt line to setup
2336 * @act: irqaction for the interrupt
2337 *
2338 * Used to statically setup per-cpu interrupts in the early boot process.
2339 */
2340int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2341{
2342 struct irq_desc *desc = irq_to_desc(irq);
2343 int retval;
2344
2345 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2346 return -EINVAL;
Jon Hunterbe45beb2016-06-07 16:12:29 +01002347
2348 retval = irq_chip_pm_get(&desc->irq_data);
2349 if (retval < 0)
2350 return retval;
2351
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002352 retval = __setup_irq(irq, desc, act);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002353
Jon Hunterbe45beb2016-06-07 16:12:29 +01002354 if (retval)
2355 irq_chip_pm_put(&desc->irq_data);
2356
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002357 return retval;
2358}
2359
2360/**
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002361 * __request_percpu_irq - allocate a percpu interrupt line
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002362 * @irq: Interrupt line to allocate
2363 * @handler: Function to be called when the IRQ occurs.
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002364 * @flags: Interrupt type flags (IRQF_TIMER only)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002365 * @devname: An ascii name for the claiming device
2366 * @dev_id: A percpu cookie passed back to the handler function
2367 *
Maxime Riparda1b7feb2015-09-25 18:09:32 +02002368 * This call allocates interrupt resources and enables the
2369 * interrupt on the local CPU. If the interrupt is supposed to be
2370 * enabled on other CPUs, it has to be done on each CPU using
2371 * enable_percpu_irq().
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002372 *
2373 * Dev_id must be globally unique. It is a per-cpu variable, and
2374 * the handler gets called with the interrupted CPU's instance of
2375 * that variable.
2376 */
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002377int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2378 unsigned long flags, const char *devname,
2379 void __percpu *dev_id)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002380{
2381 struct irqaction *action;
2382 struct irq_desc *desc;
2383 int retval;
2384
2385 if (!dev_id)
2386 return -EINVAL;
2387
2388 desc = irq_to_desc(irq);
2389 if (!desc || !irq_settings_can_request(desc) ||
2390 !irq_settings_is_per_cpu_devid(desc))
2391 return -EINVAL;
2392
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002393 if (flags && flags != IRQF_TIMER)
2394 return -EINVAL;
2395
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002396 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2397 if (!action)
2398 return -ENOMEM;
2399
2400 action->handler = handler;
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002401 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002402 action->name = devname;
2403 action->percpu_dev_id = dev_id;
2404
Jon Hunterbe45beb2016-06-07 16:12:29 +01002405 retval = irq_chip_pm_get(&desc->irq_data);
Shawn Lin4396f462016-08-22 16:21:52 +08002406 if (retval < 0) {
2407 kfree(action);
Jon Hunterbe45beb2016-06-07 16:12:29 +01002408 return retval;
Shawn Lin4396f462016-08-22 16:21:52 +08002409 }
Jon Hunterbe45beb2016-06-07 16:12:29 +01002410
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002411 retval = __setup_irq(irq, desc, action);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002412
Jon Hunterbe45beb2016-06-07 16:12:29 +01002413 if (retval) {
2414 irq_chip_pm_put(&desc->irq_data);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002415 kfree(action);
Jon Hunterbe45beb2016-06-07 16:12:29 +01002416 }
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002417
2418 return retval;
2419}
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002420EXPORT_SYMBOL_GPL(__request_percpu_irq);
Marc Zyngier1b7047e2015-03-18 11:01:22 +00002421
2422/**
Julien Thierry4b078c32019-01-31 14:53:59 +00002423 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2424 * @irq: Interrupt line to allocate
2425 * @handler: Function to be called when the IRQ occurs.
2426 * @name: An ascii name for the claiming device
2427 * @dev_id: A percpu cookie passed back to the handler function
2428 *
2429 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
Julien Thierrya5186692019-02-13 10:09:19 +00002430 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2431 * being enabled on the same CPU by using enable_percpu_nmi().
Julien Thierry4b078c32019-01-31 14:53:59 +00002432 *
2433 * Dev_id must be globally unique. It is a per-cpu variable, and
2434 * the handler gets called with the interrupted CPU's instance of
2435 * that variable.
2436 *
2437 * Interrupt lines requested for NMI delivering should have auto enabling
2438 * setting disabled.
2439 *
2440 * If the interrupt line cannot be used to deliver NMIs, function
2441 * will fail returning a negative value.
2442 */
2443int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2444 const char *name, void __percpu *dev_id)
2445{
2446 struct irqaction *action;
2447 struct irq_desc *desc;
2448 unsigned long flags;
2449 int retval;
2450
2451 if (!handler)
2452 return -EINVAL;
2453
2454 desc = irq_to_desc(irq);
2455
2456 if (!desc || !irq_settings_can_request(desc) ||
2457 !irq_settings_is_per_cpu_devid(desc) ||
2458 irq_settings_can_autoenable(desc) ||
2459 !irq_supports_nmi(desc))
2460 return -EINVAL;
2461
2462 /* The line cannot already be NMI */
2463 if (desc->istate & IRQS_NMI)
2464 return -EINVAL;
2465
2466 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2467 if (!action)
2468 return -ENOMEM;
2469
2470 action->handler = handler;
2471 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2472 | IRQF_NOBALANCING;
2473 action->name = name;
2474 action->percpu_dev_id = dev_id;
2475
2476 retval = irq_chip_pm_get(&desc->irq_data);
2477 if (retval < 0)
2478 goto err_out;
2479
2480 retval = __setup_irq(irq, desc, action);
2481 if (retval)
2482 goto err_irq_setup;
2483
2484 raw_spin_lock_irqsave(&desc->lock, flags);
2485 desc->istate |= IRQS_NMI;
2486 raw_spin_unlock_irqrestore(&desc->lock, flags);
2487
2488 return 0;
2489
2490err_irq_setup:
2491 irq_chip_pm_put(&desc->irq_data);
2492err_out:
2493 kfree(action);
2494
2495 return retval;
2496}
2497
2498/**
2499 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2500 * @irq: Interrupt line to prepare for NMI delivery
2501 *
2502 * This call prepares an interrupt line to deliver NMI on the current CPU,
2503 * before that interrupt line gets enabled with enable_percpu_nmi().
2504 *
2505 * As a CPU local operation, this should be called from non-preemptible
2506 * context.
2507 *
2508 * If the interrupt line cannot be used to deliver NMIs, function
2509 * will fail returning a negative value.
2510 */
2511int prepare_percpu_nmi(unsigned int irq)
2512{
2513 unsigned long flags;
2514 struct irq_desc *desc;
2515 int ret = 0;
2516
2517 WARN_ON(preemptible());
2518
2519 desc = irq_get_desc_lock(irq, &flags,
2520 IRQ_GET_DESC_CHECK_PERCPU);
2521 if (!desc)
2522 return -EINVAL;
2523
2524 if (WARN(!(desc->istate & IRQS_NMI),
2525 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2526 irq)) {
2527 ret = -EINVAL;
2528 goto out;
2529 }
2530
2531 ret = irq_nmi_setup(desc);
2532 if (ret) {
2533 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2534 goto out;
2535 }
2536
2537out:
2538 irq_put_desc_unlock(desc, flags);
2539 return ret;
2540}
2541
2542/**
2543 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2544 * @irq: Interrupt line from which CPU local NMI configuration should be
2545 * removed
2546 *
2547 * This call undoes the setup done by prepare_percpu_nmi().
2548 *
2549 * IRQ line should not be enabled for the current CPU.
2550 *
2551 * As a CPU local operation, this should be called from non-preemptible
2552 * context.
2553 */
2554void teardown_percpu_nmi(unsigned int irq)
2555{
2556 unsigned long flags;
2557 struct irq_desc *desc;
2558
2559 WARN_ON(preemptible());
2560
2561 desc = irq_get_desc_lock(irq, &flags,
2562 IRQ_GET_DESC_CHECK_PERCPU);
2563 if (!desc)
2564 return;
2565
2566 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2567 goto out;
2568
2569 irq_nmi_teardown(desc);
2570out:
2571 irq_put_desc_unlock(desc, flags);
2572}
2573
2574/**
Marc Zyngier1b7047e2015-03-18 11:01:22 +00002575 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2576 * @irq: Interrupt line that is forwarded to a VM
2577 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2578 * @state: a pointer to a boolean where the state is to be storeed
2579 *
2580 * This call snapshots the internal irqchip state of an
2581 * interrupt, returning into @state the bit corresponding to
2582 * stage @which
2583 *
2584 * This function should be called with preemption disabled if the
2585 * interrupt controller has per-cpu registers.
2586 */
2587int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2588 bool *state)
2589{
2590 struct irq_desc *desc;
2591 struct irq_data *data;
2592 struct irq_chip *chip;
2593 unsigned long flags;
2594 int err = -EINVAL;
2595
2596 desc = irq_get_desc_buslock(irq, &flags, 0);
2597 if (!desc)
2598 return err;
2599
2600 data = irq_desc_get_irq_data(desc);
2601
2602 do {
2603 chip = irq_data_get_irq_chip(data);
2604 if (chip->irq_get_irqchip_state)
2605 break;
2606#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2607 data = data->parent_data;
2608#else
2609 data = NULL;
2610#endif
2611 } while (data);
2612
2613 if (data)
2614 err = chip->irq_get_irqchip_state(data, which, state);
2615
2616 irq_put_desc_busunlock(desc, flags);
2617 return err;
2618}
Bjorn Andersson1ee4fb32015-07-22 12:43:04 -07002619EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
Marc Zyngier1b7047e2015-03-18 11:01:22 +00002620
2621/**
2622 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2623 * @irq: Interrupt line that is forwarded to a VM
2624 * @which: State to be restored (one of IRQCHIP_STATE_*)
2625 * @val: Value corresponding to @which
2626 *
2627 * This call sets the internal irqchip state of an interrupt,
2628 * depending on the value of @which.
2629 *
2630 * This function should be called with preemption disabled if the
2631 * interrupt controller has per-cpu registers.
2632 */
2633int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2634 bool val)
2635{
2636 struct irq_desc *desc;
2637 struct irq_data *data;
2638 struct irq_chip *chip;
2639 unsigned long flags;
2640 int err = -EINVAL;
2641
2642 desc = irq_get_desc_buslock(irq, &flags, 0);
2643 if (!desc)
2644 return err;
2645
2646 data = irq_desc_get_irq_data(desc);
2647
2648 do {
2649 chip = irq_data_get_irq_chip(data);
2650 if (chip->irq_set_irqchip_state)
2651 break;
2652#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2653 data = data->parent_data;
2654#else
2655 data = NULL;
2656#endif
2657 } while (data);
2658
2659 if (data)
2660 err = chip->irq_set_irqchip_state(data, which, val);
2661
2662 irq_put_desc_busunlock(desc, flags);
2663 return err;
2664}
Bjorn Andersson1ee4fb32015-07-22 12:43:04 -07002665EXPORT_SYMBOL_GPL(irq_set_irqchip_state);