blob: ef89f72466569b5cbfb696a613f8692c513dc0be [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/irq/manage.c
3 *
Ingo Molnara34db9b2006-06-29 02:24:50 -07004 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
Andrew Morton97fd75b2012-05-31 16:26:07 -070010#define pr_fmt(fmt) "genirq: " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/irq.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010013#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070017#include <linux/slab.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010018#include <linux/sched.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060019#include <linux/sched/rt.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010020#include <linux/sched/task.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010021#include <uapi/linux/sched/types.h>
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +100022#include <linux/task_work.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include "internals.h"
25
Thomas Gleixner8d32a302011-02-23 23:52:23 +000026#ifdef CONFIG_IRQ_FORCED_THREADING
27__read_mostly bool force_irqthreads;
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
Thomas Gleixner18258f72014-02-15 00:55:18 +000037static void __synchronize_hardirq(struct irq_desc *desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Thomas Gleixner32f41252011-03-28 14:10:52 +020039 bool inprogress;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Herbert Xua98ce5c2007-10-23 11:26:25 +080041 do {
42 unsigned long flags;
43
44 /*
45 * Wait until we're out of the critical section. This might
46 * give the wrong answer due to the lack of memory barriers.
47 */
Thomas Gleixner32f41252011-03-28 14:10:52 +020048 while (irqd_irq_inprogress(&desc->irq_data))
Herbert Xua98ce5c2007-10-23 11:26:25 +080049 cpu_relax();
50
51 /* Ok, that indicated we're done: double-check carefully. */
Thomas Gleixner239007b2009-11-17 16:46:45 +010052 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner32f41252011-03-28 14:10:52 +020053 inprogress = irqd_irq_inprogress(&desc->irq_data);
Thomas Gleixner239007b2009-11-17 16:46:45 +010054 raw_spin_unlock_irqrestore(&desc->lock, flags);
Herbert Xua98ce5c2007-10-23 11:26:25 +080055
56 /* Oops, that failed? */
Thomas Gleixner32f41252011-03-28 14:10:52 +020057 } while (inprogress);
Thomas Gleixner18258f72014-02-15 00:55:18 +000058}
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010059
Thomas Gleixner18258f72014-02-15 00:55:18 +000060/**
61 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
62 * @irq: interrupt number to wait for
63 *
64 * This function waits for any pending hard IRQ handlers for this
65 * interrupt to complete before returning. If you use this
66 * function while holding a resource the IRQ handler may need you
67 * will deadlock. It does not take associated threaded handlers
68 * into account.
69 *
70 * Do not use this for shutdown scenarios where you must be sure
71 * that all parts (hardirq and threaded handler) have completed.
72 *
Peter Zijlstra02cea392015-02-05 14:06:23 +010073 * Returns: false if a threaded handler is active.
74 *
Thomas Gleixner18258f72014-02-15 00:55:18 +000075 * This function may be called - with care - from IRQ context.
76 */
Peter Zijlstra02cea392015-02-05 14:06:23 +010077bool synchronize_hardirq(unsigned int irq)
Thomas Gleixner18258f72014-02-15 00:55:18 +000078{
79 struct irq_desc *desc = irq_to_desc(irq);
80
Peter Zijlstra02cea392015-02-05 14:06:23 +010081 if (desc) {
Thomas Gleixner18258f72014-02-15 00:55:18 +000082 __synchronize_hardirq(desc);
Peter Zijlstra02cea392015-02-05 14:06:23 +010083 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
Thomas Gleixner18258f72014-02-15 00:55:18 +000087}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90/**
91 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
92 * @irq: interrupt number to wait for
93 *
94 * This function waits for any pending IRQ handlers for this interrupt
95 * to complete before returning. If you use this function while
96 * holding a resource the IRQ handler may need you will deadlock.
97 *
98 * This function may be called - with care - from IRQ context.
99 */
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106 /*
107 * We made sure that no hardirq handler is
108 * running. Now verify that no threaded handlers are
109 * active.
110 */
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115EXPORT_SYMBOL(synchronize_irq);
116
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
Thomas Gleixner9c255582016-07-04 17:39:23 +0900120static bool __irq_can_set_affinity(struct irq_desc *desc)
Jiang Liue019c242015-06-23 20:29:34 +0200121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
Thomas Gleixner9c255582016-07-04 17:39:23 +0900124 return false;
125 return true;
Jiang Liue019c242015-06-23 20:29:34 +0200126}
127
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800128/**
129 * irq_can_set_affinity - Check if the affinity of a given irq can be set
130 * @irq: Interrupt to check
131 *
132 */
133int irq_can_set_affinity(unsigned int irq)
134{
Jiang Liue019c242015-06-23 20:29:34 +0200135 return __irq_can_set_affinity(irq_to_desc(irq));
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800136}
137
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200138/**
Thomas Gleixner9c255582016-07-04 17:39:23 +0900139 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
140 * @irq: Interrupt to check
141 *
142 * Like irq_can_set_affinity() above, but additionally checks for the
143 * AFFINITY_MANAGED flag.
144 */
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153/**
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200154 * irq_set_thread_affinity - Notify irq threads to adjust affinity
155 * @desc: irq descriptor which has affitnity changed
156 *
157 * We just set IRQTF_AFFINITY and delegate the affinity setting
158 * to the interrupt thread itself. We can not call
159 * set_cpus_allowed_ptr() here as we hold desc->lock and this
160 * code can be called from hard interrupt context.
161 */
162void irq_set_thread_affinity(struct irq_desc *desc)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100163{
Daniel Lezcanof944b5a2016-01-14 10:54:13 +0100164 struct irqaction *action;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100165
Daniel Lezcanof944b5a2016-01-14 10:54:13 +0100166 for_each_action_of_desc(desc, action)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100167 if (action->thread)
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100169}
170
Thomas Gleixner19e1d4e2017-10-09 12:41:36 +0200171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
Jiang Liu818b0f32012-03-30 23:11:34 +0800184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000191 ret = chip->irq_set_affinity(data, mask, force);
Jiang Liu818b0f32012-03-30 23:11:34 +0800192 switch (ret) {
193 case IRQ_SET_MASK_OK:
Jiang Liu2cb62542014-11-06 22:20:18 +0800194 case IRQ_SET_MASK_OK_DONE:
Jiang Liu9df872f2015-06-03 11:47:50 +0800195 cpumask_copy(desc->irq_common_data.affinity, mask);
Jiang Liu818b0f32012-03-30 23:11:34 +0800196 case IRQ_SET_MASK_OK_NOCOPY:
Thomas Gleixner19e1d4e2017-10-09 12:41:36 +0200197 irq_validate_effective_affinity(data);
Jiang Liu818b0f32012-03-30 23:11:34 +0800198 irq_set_thread_affinity(desc);
199 ret = 0;
200 }
201
202 return ret;
203}
204
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000205int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
206 bool force)
David Daneyc2d0c552011-03-25 12:38:50 -0700207{
208 struct irq_chip *chip = irq_data_get_irq_chip(data);
209 struct irq_desc *desc = irq_data_to_desc(data);
210 int ret = 0;
211
212 if (!chip || !chip->irq_set_affinity)
213 return -EINVAL;
214
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200215 if (irq_can_move_pcntxt(data)) {
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000216 ret = irq_do_set_affinity(data, mask, force);
David Daneyc2d0c552011-03-25 12:38:50 -0700217 } else {
218 irqd_set_move_pending(data);
219 irq_copy_pending(desc, mask);
220 }
221
222 if (desc->affinity_notify) {
223 kref_get(&desc->affinity_notify->kref);
224 schedule_work(&desc->affinity_notify->work);
225 }
David Daneyc2d0c552011-03-25 12:38:50 -0700226 irqd_set(data, IRQD_AFFINITY_SET);
227
228 return ret;
229}
230
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000231int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800232{
Yinghai Lu08678b02008-08-19 20:50:05 -0700233 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100234 unsigned long flags;
David Daneyc2d0c552011-03-25 12:38:50 -0700235 int ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800236
David Daneyc2d0c552011-03-25 12:38:50 -0700237 if (!desc)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800238 return -EINVAL;
239
Thomas Gleixner239007b2009-11-17 16:46:45 +0100240 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000241 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100242 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100243 return ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800244}
245
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700246int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
247{
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700248 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100249 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700250
251 if (!desc)
252 return -EINVAL;
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700253 desc->affinity_hint = m;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100254 irq_put_desc_unlock(desc, flags);
Jesse Brandeburge2e64a92014-12-18 17:22:06 -0800255 /* set the initial affinity to prevent every interrupt being on CPU0 */
Jesse Brandeburg4fe7ffb2015-01-28 10:57:39 -0800256 if (m)
257 __irq_set_affinity(irq, m, false);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700258 return 0;
259}
260EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
261
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000262static void irq_affinity_notify(struct work_struct *work)
263{
264 struct irq_affinity_notify *notify =
265 container_of(work, struct irq_affinity_notify, work);
266 struct irq_desc *desc = irq_to_desc(notify->irq);
267 cpumask_var_t cpumask;
268 unsigned long flags;
269
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100270 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000271 goto out;
272
273 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200274 if (irq_move_pending(&desc->irq_data))
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100275 irq_get_pending(cpumask, desc);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000276 else
Jiang Liu9df872f2015-06-03 11:47:50 +0800277 cpumask_copy(cpumask, desc->irq_common_data.affinity);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000278 raw_spin_unlock_irqrestore(&desc->lock, flags);
279
280 notify->notify(notify, cpumask);
281
282 free_cpumask_var(cpumask);
283out:
284 kref_put(&notify->kref, notify->release);
285}
286
287/**
288 * irq_set_affinity_notifier - control notification of IRQ affinity changes
289 * @irq: Interrupt for which to enable/disable notification
290 * @notify: Context for notification, or %NULL to disable
291 * notification. Function pointers must be initialised;
292 * the other fields will be initialised by this function.
293 *
294 * Must be called in process context. Notification may only be enabled
295 * after the IRQ is allocated and must be disabled before the IRQ is
296 * freed using free_irq().
297 */
298int
299irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
300{
301 struct irq_desc *desc = irq_to_desc(irq);
302 struct irq_affinity_notify *old_notify;
303 unsigned long flags;
304
305 /* The release function is promised process context */
306 might_sleep();
307
308 if (!desc)
309 return -EINVAL;
310
311 /* Complete initialisation of *notify */
312 if (notify) {
313 notify->irq = irq;
314 kref_init(&notify->kref);
315 INIT_WORK(&notify->work, irq_affinity_notify);
316 }
317
318 raw_spin_lock_irqsave(&desc->lock, flags);
319 old_notify = desc->affinity_notify;
320 desc->affinity_notify = notify;
321 raw_spin_unlock_irqrestore(&desc->lock, flags);
322
323 if (old_notify)
324 kref_put(&old_notify->kref, old_notify->release);
325
326 return 0;
327}
328EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
329
Max Krasnyansky18404752008-05-29 11:02:52 -0700330#ifndef CONFIG_AUTO_IRQ_AFFINITY
331/*
332 * Generic version of the affinity autoselector.
333 */
Thomas Gleixner43564bd2017-06-20 01:37:22 +0200334int irq_setup_affinity(struct irq_desc *desc)
Max Krasnyansky18404752008-05-29 11:02:52 -0700335{
Thomas Gleixner569bda82011-02-07 17:05:08 +0100336 struct cpumask *set = irq_default_affinity;
Thomas Gleixnercba42352017-06-20 01:37:21 +0200337 int ret, node = irq_desc_get_node(desc);
338 static DEFINE_RAW_SPINLOCK(mask_lock);
339 static struct cpumask mask;
Thomas Gleixner569bda82011-02-07 17:05:08 +0100340
Thomas Gleixnerb0082072011-02-07 17:30:50 +0100341 /* Excludes PER_CPU and NO_BALANCE interrupts */
Jiang Liue019c242015-06-23 20:29:34 +0200342 if (!__irq_can_set_affinity(desc))
Max Krasnyansky18404752008-05-29 11:02:52 -0700343 return 0;
344
Thomas Gleixnercba42352017-06-20 01:37:21 +0200345 raw_spin_lock(&mask_lock);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100346 /*
Masahiro Yamada9332ef92017-02-27 14:28:47 -0800347 * Preserve the managed affinity setting and a userspace affinity
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900348 * setup, but make sure that one of the targets is online.
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100349 */
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900350 if (irqd_affinity_is_managed(&desc->irq_data) ||
351 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
Jiang Liu9df872f2015-06-03 11:47:50 +0800352 if (cpumask_intersects(desc->irq_common_data.affinity,
Thomas Gleixner569bda82011-02-07 17:05:08 +0100353 cpu_online_mask))
Jiang Liu9df872f2015-06-03 11:47:50 +0800354 set = desc->irq_common_data.affinity;
Thomas Gleixner0c6f8a82011-03-28 13:32:20 +0200355 else
Thomas Gleixner2bdd1052011-02-08 17:22:00 +0100356 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100357 }
Max Krasnyansky18404752008-05-29 11:02:52 -0700358
Thomas Gleixnercba42352017-06-20 01:37:21 +0200359 cpumask_and(&mask, cpu_online_mask, set);
Prarit Bhargava241fc642012-03-26 15:02:18 -0400360 if (node != NUMA_NO_NODE) {
361 const struct cpumask *nodemask = cpumask_of_node(node);
362
363 /* make sure at least one of the cpus in nodemask is online */
Thomas Gleixnercba42352017-06-20 01:37:21 +0200364 if (cpumask_intersects(&mask, nodemask))
365 cpumask_and(&mask, &mask, nodemask);
Prarit Bhargava241fc642012-03-26 15:02:18 -0400366 }
Thomas Gleixnercba42352017-06-20 01:37:21 +0200367 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
368 raw_spin_unlock(&mask_lock);
369 return ret;
Max Krasnyansky18404752008-05-29 11:02:52 -0700370}
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100371#else
Jiang Liua8a98ea2015-06-04 12:13:30 +0800372/* Wrapper for ALPHA specific affinity selector magic */
Thomas Gleixnercba42352017-06-20 01:37:21 +0200373int irq_setup_affinity(struct irq_desc *desc)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100374{
Thomas Gleixnercba42352017-06-20 01:37:21 +0200375 return irq_select_affinity(irq_desc_get_irq(desc));
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100376}
Max Krasnyansky18404752008-05-29 11:02:52 -0700377#endif
378
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100379/*
Thomas Gleixnercba42352017-06-20 01:37:21 +0200380 * Called when a bogus affinity is set via /proc/irq
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100381 */
Thomas Gleixnercba42352017-06-20 01:37:21 +0200382int irq_select_affinity_usr(unsigned int irq)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100383{
384 struct irq_desc *desc = irq_to_desc(irq);
385 unsigned long flags;
386 int ret;
387
Thomas Gleixner239007b2009-11-17 16:46:45 +0100388 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixnercba42352017-06-20 01:37:21 +0200389 ret = irq_setup_affinity(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100390 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100391 return ret;
392}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393#endif
394
Feng Wufcf1ae22015-10-03 16:20:38 +0800395/**
396 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
397 * @irq: interrupt number to set affinity
398 * @vcpu_info: vCPU specific data
399 *
400 * This function uses the vCPU specific data to set the vCPU
401 * affinity for an irq. The vCPU specific data is passed from
402 * outside, such as KVM. One example code path is as below:
403 * KVM -> IOMMU -> irq_set_vcpu_affinity().
404 */
405int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
406{
407 unsigned long flags;
408 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
409 struct irq_data *data;
410 struct irq_chip *chip;
411 int ret = -ENOSYS;
412
413 if (!desc)
414 return -EINVAL;
415
416 data = irq_desc_get_irq_data(desc);
Marc Zyngier0abce642017-06-23 21:42:57 +0100417 do {
418 chip = irq_data_get_irq_chip(data);
419 if (chip && chip->irq_set_vcpu_affinity)
420 break;
421#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
422 data = data->parent_data;
423#else
424 data = NULL;
425#endif
426 } while (data);
427
428 if (data)
Feng Wufcf1ae22015-10-03 16:20:38 +0800429 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
430 irq_put_desc_unlock(desc, flags);
431
432 return ret;
433}
434EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
435
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200436void __disable_irq(struct irq_desc *desc)
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100437{
Thomas Gleixner3aae9942011-02-04 10:17:52 +0100438 if (!desc->depth++)
Thomas Gleixner87923472011-02-03 12:27:44 +0100439 irq_disable(desc);
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100440}
441
Thomas Gleixner02725e72011-02-12 10:37:36 +0100442static int __disable_irq_nosync(unsigned int irq)
443{
444 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100445 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100446
447 if (!desc)
448 return -EINVAL;
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200449 __disable_irq(desc);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100450 irq_put_desc_busunlock(desc, flags);
451 return 0;
452}
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454/**
455 * disable_irq_nosync - disable an irq without waiting
456 * @irq: Interrupt to disable
457 *
458 * Disable the selected interrupt line. Disables and Enables are
459 * nested.
460 * Unlike disable_irq(), this function does not ensure existing
461 * instances of the IRQ handler have completed before returning.
462 *
463 * This function may be called from IRQ context.
464 */
465void disable_irq_nosync(unsigned int irq)
466{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100467 __disable_irq_nosync(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469EXPORT_SYMBOL(disable_irq_nosync);
470
471/**
472 * disable_irq - disable an irq and wait for completion
473 * @irq: Interrupt to disable
474 *
475 * Disable the selected interrupt line. Enables and Disables are
476 * nested.
477 * This function waits for any pending IRQ handlers for this interrupt
478 * to complete before returning. If you use this function while
479 * holding a resource the IRQ handler may need you will deadlock.
480 *
481 * This function may be called - with care - from IRQ context.
482 */
483void disable_irq(unsigned int irq)
484{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100485 if (!__disable_irq_nosync(irq))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 synchronize_irq(irq);
487}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488EXPORT_SYMBOL(disable_irq);
489
Peter Zijlstra02cea392015-02-05 14:06:23 +0100490/**
491 * disable_hardirq - disables an irq and waits for hardirq completion
492 * @irq: Interrupt to disable
493 *
494 * Disable the selected interrupt line. Enables and Disables are
495 * nested.
496 * This function waits for any pending hard IRQ handlers for this
497 * interrupt to complete before returning. If you use this function while
498 * holding a resource the hard IRQ handler may need you will deadlock.
499 *
500 * When used to optimistically disable an interrupt from atomic context
501 * the return value must be checked.
502 *
503 * Returns: false if a threaded handler is active.
504 *
505 * This function may be called - with care - from IRQ context.
506 */
507bool disable_hardirq(unsigned int irq)
508{
509 if (!__disable_irq_nosync(irq))
510 return synchronize_hardirq(irq);
511
512 return false;
513}
514EXPORT_SYMBOL_GPL(disable_hardirq);
515
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200516void __enable_irq(struct irq_desc *desc)
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200517{
518 switch (desc->depth) {
519 case 0:
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100520 err_out:
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200521 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
522 irq_desc_get_irq(desc));
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200523 break;
524 case 1: {
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100525 if (desc->istate & IRQS_SUSPENDED)
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100526 goto err_out;
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200527 /* Prevent probing on this irq: */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100528 irq_settings_set_noprobe(desc);
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200529 /*
530 * Call irq_startup() not irq_enable() here because the
531 * interrupt might be marked NOAUTOEN. So irq_startup()
532 * needs to be invoked when it gets enabled the first
533 * time. If it was already started up, then irq_startup()
534 * will invoke irq_enable() under the hood.
535 */
Thomas Gleixner4cde9c62017-06-20 01:37:49 +0200536 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
Thomas Gleixner201d7f42017-05-31 11:58:32 +0200537 break;
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200538 }
539 default:
540 desc->depth--;
541 }
542}
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544/**
545 * enable_irq - enable handling of an irq
546 * @irq: Interrupt to enable
547 *
548 * Undoes the effect of one call to disable_irq(). If this
549 * matches the last disable, processing of interrupts on this
550 * IRQ line is re-enabled.
551 *
Thomas Gleixner70aedd22009-08-13 12:17:48 +0200552 * This function may be called from IRQ context only when
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200553 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 */
555void enable_irq(unsigned int irq)
556{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100558 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700560 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -0700561 return;
Thomas Gleixner50f7c032011-02-03 13:23:54 +0100562 if (WARN(!desc->irq_data.chip,
563 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
Thomas Gleixner02725e72011-02-12 10:37:36 +0100564 goto out;
Thomas Gleixner2656c362010-10-22 14:47:57 +0200565
Jiang Liu79ff1cd2015-06-23 19:52:36 +0200566 __enable_irq(desc);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100567out:
568 irq_put_desc_busunlock(desc, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570EXPORT_SYMBOL(enable_irq);
571
David Brownell0c5d1eb2008-10-01 14:46:18 -0700572static int set_irq_wake_real(unsigned int irq, unsigned int on)
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200573{
Yinghai Lu08678b02008-08-19 20:50:05 -0700574 struct irq_desc *desc = irq_to_desc(irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200575 int ret = -ENXIO;
576
Santosh Shilimkar60f96b42011-09-09 13:59:35 +0530577 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
578 return 0;
579
Thomas Gleixner2f7e99b2010-09-27 12:45:50 +0000580 if (desc->irq_data.chip->irq_set_wake)
581 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200582
583 return ret;
584}
585
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700586/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100587 * irq_set_irq_wake - control irq power management wakeup
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700588 * @irq: interrupt to control
589 * @on: enable/disable power management wakeup
590 *
David Brownell15a647e2006-07-30 03:03:08 -0700591 * Enable/disable power management wakeup mode, which is
592 * disabled by default. Enables and disables must match,
593 * just as they match for non-wakeup mode support.
594 *
595 * Wakeup mode lets this IRQ wake the system from sleep
596 * states like "suspend to RAM".
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700597 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100598int irq_set_irq_wake(unsigned int irq, unsigned int on)
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700599{
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700600 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100601 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200602 int ret = 0;
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700603
Jesper Juhl13863a62011-06-09 23:14:58 +0200604 if (!desc)
605 return -EINVAL;
606
David Brownell15a647e2006-07-30 03:03:08 -0700607 /* wakeup-capable irqs can be shared between drivers that
608 * don't need to have the same sleep mode behaviors.
609 */
David Brownell15a647e2006-07-30 03:03:08 -0700610 if (on) {
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200611 if (desc->wake_depth++ == 0) {
612 ret = set_irq_wake_real(irq, on);
613 if (ret)
614 desc->wake_depth = 0;
615 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100616 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200617 }
David Brownell15a647e2006-07-30 03:03:08 -0700618 } else {
619 if (desc->wake_depth == 0) {
Arjan van de Ven7a2c4772008-07-25 01:45:54 -0700620 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200621 } else if (--desc->wake_depth == 0) {
622 ret = set_irq_wake_real(irq, on);
623 if (ret)
624 desc->wake_depth = 1;
625 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100626 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200627 }
David Brownell15a647e2006-07-30 03:03:08 -0700628 }
Thomas Gleixner02725e72011-02-12 10:37:36 +0100629 irq_put_desc_busunlock(desc, flags);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700630 return ret;
631}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100632EXPORT_SYMBOL(irq_set_irq_wake);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634/*
635 * Internal function that tells the architecture code whether a
636 * particular irq has been exclusively allocated or is available
637 * for driver use.
638 */
639int can_request_irq(unsigned int irq, unsigned long irqflags)
640{
Thomas Gleixnercc8c3b72010-03-23 22:40:53 +0100641 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100642 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100643 int canrequest = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700645 if (!desc)
646 return 0;
647
Thomas Gleixner02725e72011-02-12 10:37:36 +0100648 if (irq_settings_can_request(desc)) {
Ben Hutchings2779db82013-06-28 02:40:30 +0100649 if (!desc->action ||
650 irqflags & desc->action->flags & IRQF_SHARED)
651 canrequest = 1;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100652 }
653 irq_put_desc_unlock(desc, flags);
654 return canrequest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
Jiang Liua1ff5412015-06-23 19:47:29 +0200657int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700658{
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200659 struct irq_chip *chip = desc->irq_data.chip;
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100660 int ret, unmask = 0;
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700661
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000662 if (!chip || !chip->irq_set_type) {
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700663 /*
664 * IRQF_TRIGGER_* but the PIC does not support multiple
665 * flow-types?
666 */
Jiang Liua1ff5412015-06-23 19:47:29 +0200667 pr_debug("No set_type function for IRQ %d (%s)\n",
668 irq_desc_get_irq(desc),
Thomas Gleixnerf5d89472012-04-19 12:06:13 +0200669 chip ? (chip->name ? : "unknown") : "unknown");
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700670 return 0;
671 }
672
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100673 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
Thomas Gleixner32f41252011-03-28 14:10:52 +0200674 if (!irqd_irq_masked(&desc->irq_data))
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100675 mask_irq(desc);
Thomas Gleixner32f41252011-03-28 14:10:52 +0200676 if (!irqd_irq_disabled(&desc->irq_data))
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100677 unmask = 1;
678 }
679
Alexander Kuleshov00b992d2016-07-19 15:54:08 +0600680 /* Mask all flags except trigger mode */
681 flags &= IRQ_TYPE_SENSE_MASK;
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000682 ret = chip->irq_set_type(&desc->irq_data, flags);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700683
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100684 switch (ret) {
685 case IRQ_SET_MASK_OK:
Jiang Liu2cb62542014-11-06 22:20:18 +0800686 case IRQ_SET_MASK_OK_DONE:
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100687 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
688 irqd_set(&desc->irq_data, flags);
689
690 case IRQ_SET_MASK_OK_NOCOPY:
691 flags = irqd_get_trigger_type(&desc->irq_data);
692 irq_settings_set_trigger_mask(desc, flags);
693 irqd_clear(&desc->irq_data, IRQD_LEVEL);
694 irq_settings_clr_level(desc);
695 if (flags & IRQ_TYPE_LEVEL_MASK) {
696 irq_settings_set_level(desc);
697 irqd_set(&desc->irq_data, IRQD_LEVEL);
698 }
Thomas Gleixner46732472010-06-07 17:53:51 +0200699
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100700 ret = 0;
Thomas Gleixner8fff39e2011-02-21 14:19:42 +0100701 break;
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100702 default:
Andrew Morton97fd75b2012-05-31 16:26:07 -0700703 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
Jiang Liua1ff5412015-06-23 19:47:29 +0200704 flags, irq_desc_get_irq(desc), chip->irq_set_type);
David Brownell0c5d1eb2008-10-01 14:46:18 -0700705 }
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100706 if (unmask)
707 unmask_irq(desc);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700708 return ret;
709}
710
Thomas Gleixner293a7a02012-10-16 15:07:49 -0700711#ifdef CONFIG_HARDIRQS_SW_RESEND
712int irq_set_parent(int irq, int parent_irq)
713{
714 unsigned long flags;
715 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
716
717 if (!desc)
718 return -EINVAL;
719
720 desc->parent_irq = parent_irq;
721
722 irq_put_desc_unlock(desc, flags);
723 return 0;
724}
Sudip Mukherjee3118dac2016-10-06 23:06:43 +0530725EXPORT_SYMBOL_GPL(irq_set_parent);
Thomas Gleixner293a7a02012-10-16 15:07:49 -0700726#endif
727
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200728/*
729 * Default primary interrupt handler for threaded interrupts. Is
730 * assigned as primary handler when request_threaded_irq is called
731 * with handler == NULL. Useful for oneshot interrupts.
732 */
733static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
734{
735 return IRQ_WAKE_THREAD;
736}
737
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200738/*
739 * Primary handler for nested threaded interrupts. Should never be
740 * called.
741 */
742static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
743{
744 WARN(1, "Primary handler called for nested irq %d\n", irq);
745 return IRQ_NONE;
746}
747
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200748static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
749{
750 WARN(1, "Secondary action handler called for irq %d\n", irq);
751 return IRQ_NONE;
752}
753
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100754static int irq_wait_for_interrupt(struct irqaction *action)
755{
Ido Yariv550acb12011-12-01 13:55:08 +0200756 set_current_state(TASK_INTERRUPTIBLE);
757
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100758 while (!kthread_should_stop()) {
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100759
760 if (test_and_clear_bit(IRQTF_RUNTHREAD,
761 &action->thread_flags)) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100762 __set_current_state(TASK_RUNNING);
763 return 0;
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100764 }
765 schedule();
Ido Yariv550acb12011-12-01 13:55:08 +0200766 set_current_state(TASK_INTERRUPTIBLE);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100767 }
Ido Yariv550acb12011-12-01 13:55:08 +0200768 __set_current_state(TASK_RUNNING);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100769 return -1;
770}
771
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200772/*
773 * Oneshot interrupts keep the irq line masked until the threaded
774 * handler finished. unmask if the interrupt has not been disabled and
775 * is marked MASKED.
776 */
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000777static void irq_finalize_oneshot(struct irq_desc *desc,
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100778 struct irqaction *action)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200779{
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200780 if (!(desc->istate & IRQS_ONESHOT) ||
781 action->handler == irq_forced_secondary_handler)
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000782 return;
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100783again:
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000784 chip_bus_lock(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100785 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100786
787 /*
788 * Implausible though it may be we need to protect us against
789 * the following scenario:
790 *
791 * The thread is faster done than the hard interrupt handler
792 * on the other CPU. If we unmask the irq line then the
793 * interrupt can come in again and masks the line, leaves due
Thomas Gleixner009b4c32011-02-07 21:48:49 +0100794 * to IRQS_INPROGRESS and the irq line is masked forever.
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000795 *
796 * This also serializes the state of shared oneshot handlers
797 * versus "desc->threads_onehsot |= action->thread_mask;" in
798 * irq_wake_thread(). See the comment there which explains the
799 * serialization.
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100800 */
Thomas Gleixner32f41252011-03-28 14:10:52 +0200801 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100802 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000803 chip_bus_sync_unlock(desc);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100804 cpu_relax();
805 goto again;
806 }
807
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000808 /*
809 * Now check again, whether the thread should run. Otherwise
810 * we would clear the threads_oneshot bit of this thread which
811 * was just set.
812 */
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100813 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000814 goto out_unlock;
815
816 desc->threads_oneshot &= ~action->thread_mask;
817
Thomas Gleixner32f41252011-03-28 14:10:52 +0200818 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
819 irqd_irq_masked(&desc->irq_data))
Thomas Gleixner328a4972014-03-13 19:03:51 +0100820 unmask_threaded_irq(desc);
Thomas Gleixner32f41252011-03-28 14:10:52 +0200821
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000822out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100823 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000824 chip_bus_sync_unlock(desc);
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200825}
826
Bruno Premont61f38262009-07-22 22:22:32 +0200827#ifdef CONFIG_SMP
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100828/*
Chuansheng Liub04c6442014-02-10 16:13:57 +0800829 * Check whether we need to change the affinity of the interrupt thread.
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200830 */
831static void
832irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
833{
834 cpumask_var_t mask;
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100835 bool valid = true;
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200836
837 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
838 return;
839
840 /*
841 * In case we are out of memory we set IRQTF_AFFINITY again and
842 * try again next time
843 */
844 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
845 set_bit(IRQTF_AFFINITY, &action->thread_flags);
846 return;
847 }
848
Thomas Gleixner239007b2009-11-17 16:46:45 +0100849 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100850 /*
851 * This code is triggered unconditionally. Check the affinity
852 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
853 */
Matthias Kaehlcked170fe72017-04-12 11:20:30 -0700854 if (cpumask_available(desc->irq_common_data.affinity))
Jiang Liu9df872f2015-06-03 11:47:50 +0800855 cpumask_copy(mask, desc->irq_common_data.affinity);
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100856 else
857 valid = false;
Thomas Gleixner239007b2009-11-17 16:46:45 +0100858 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200859
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100860 if (valid)
861 set_cpus_allowed_ptr(current, mask);
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200862 free_cpumask_var(mask);
863}
Bruno Premont61f38262009-07-22 22:22:32 +0200864#else
865static inline void
866irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
867#endif
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200868
869/*
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000870 * Interrupts which are not explicitely requested as threaded
871 * interrupts rely on the implicit bh/preempt disable of the hard irq
872 * context. So we need to disable bh here to avoid deadlocks and other
873 * side effects.
874 */
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200875static irqreturn_t
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000876irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
877{
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200878 irqreturn_t ret;
879
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000880 local_bh_disable();
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200881 ret = action->thread_fn(action->irq, action->dev_id);
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100882 irq_finalize_oneshot(desc, action);
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000883 local_bh_enable();
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200884 return ret;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000885}
886
887/*
Xie XiuQif788e7b2013-10-18 09:12:04 +0800888 * Interrupts explicitly requested as threaded interrupts want to be
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000889 * preemtible - many of them need to sleep and wait for slow busses to
890 * complete.
891 */
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200892static irqreturn_t irq_thread_fn(struct irq_desc *desc,
893 struct irqaction *action)
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000894{
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200895 irqreturn_t ret;
896
897 ret = action->thread_fn(action->irq, action->dev_id);
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100898 irq_finalize_oneshot(desc, action);
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200899 return ret;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000900}
901
Ido Yariv7140ea12011-12-02 18:24:12 +0200902static void wake_threads_waitq(struct irq_desc *desc)
903{
Chuansheng Liuc6856892014-02-24 11:29:50 +0800904 if (atomic_dec_and_test(&desc->threads_active))
Ido Yariv7140ea12011-12-02 18:24:12 +0200905 wake_up(&desc->wait_for_threads);
906}
907
Al Viro67d12142012-06-27 11:07:19 +0400908static void irq_thread_dtor(struct callback_head *unused)
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000909{
910 struct task_struct *tsk = current;
911 struct irq_desc *desc;
912 struct irqaction *action;
913
914 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
915 return;
916
917 action = kthread_data(tsk);
918
Linus Torvaldsfb21aff2012-05-31 18:47:30 -0700919 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
Alan Cox19af3952012-12-18 14:21:25 -0800920 tsk->comm, tsk->pid, action->irq);
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000921
922
923 desc = irq_to_desc(action->irq);
924 /*
925 * If IRQTF_RUNTHREAD is set, we need to decrement
926 * desc->threads_active and wake possible waiters.
927 */
928 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
929 wake_threads_waitq(desc);
930
931 /* Prevent a stale desc->threads_oneshot */
932 irq_finalize_oneshot(desc, action);
933}
934
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200935static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
936{
937 struct irqaction *secondary = action->secondary;
938
939 if (WARN_ON_ONCE(!secondary))
940 return;
941
942 raw_spin_lock_irq(&desc->lock);
943 __irq_wake_thread(desc, secondary);
944 raw_spin_unlock_irq(&desc->lock);
945}
946
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000947/*
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100948 * Interrupt handler thread
949 */
950static int irq_thread(void *data)
951{
Al Viro67d12142012-06-27 11:07:19 +0400952 struct callback_head on_exit_work;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100953 struct irqaction *action = data;
954 struct irq_desc *desc = irq_to_desc(action->irq);
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200955 irqreturn_t (*handler_fn)(struct irq_desc *desc,
956 struct irqaction *action);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100957
Alexander Gordeev540b60e2012-03-09 14:59:13 +0100958 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000959 &action->thread_flags))
960 handler_fn = irq_forced_thread_fn;
961 else
962 handler_fn = irq_thread_fn;
963
Al Viro41f9d292012-06-26 22:10:04 +0400964 init_task_work(&on_exit_work, irq_thread_dtor);
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000965 task_work_add(current, &on_exit_work, false);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100966
Sankara Muthukrishnanf3de44e2012-10-31 15:41:23 -0500967 irq_thread_check_affinity(desc, action);
968
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100969 while (!irq_wait_for_interrupt(action)) {
Ido Yariv7140ea12011-12-02 18:24:12 +0200970 irqreturn_t action_ret;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100971
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200972 irq_thread_check_affinity(desc, action);
973
Ido Yariv7140ea12011-12-02 18:24:12 +0200974 action_ret = handler_fn(desc, action);
Thomas Gleixner1e77d0a2013-03-07 14:53:45 +0100975 if (action_ret == IRQ_HANDLED)
976 atomic_inc(&desc->threads_handled);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +0200977 if (action_ret == IRQ_WAKE_THREAD)
978 irq_wake_secondary(desc, action);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100979
Ido Yariv7140ea12011-12-02 18:24:12 +0200980 wake_threads_waitq(desc);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100981 }
982
Ido Yariv7140ea12011-12-02 18:24:12 +0200983 /*
984 * This is the regular exit path. __free_irq() is stopping the
985 * thread via kthread_stop() after calling
986 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
Thomas Gleixnere04268b2012-03-15 22:55:21 +0100987 * oneshot mask bit can be set. We cannot verify that as we
988 * cannot touch the oneshot mask at this point anymore as
989 * __setup_irq() might have given out currents thread_mask
990 * again.
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100991 */
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000992 task_work_cancel(current, irq_thread_dtor);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100993 return 0;
994}
995
Thomas Gleixnera92444c2014-02-15 00:55:19 +0000996/**
997 * irq_wake_thread - wake the irq thread for the action identified by dev_id
998 * @irq: Interrupt line
999 * @dev_id: Device identity for which the thread should be woken
1000 *
1001 */
1002void irq_wake_thread(unsigned int irq, void *dev_id)
1003{
1004 struct irq_desc *desc = irq_to_desc(irq);
1005 struct irqaction *action;
1006 unsigned long flags;
1007
1008 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1009 return;
1010
1011 raw_spin_lock_irqsave(&desc->lock, flags);
Daniel Lezcanof944b5a2016-01-14 10:54:13 +01001012 for_each_action_of_desc(desc, action) {
Thomas Gleixnera92444c2014-02-15 00:55:19 +00001013 if (action->dev_id == dev_id) {
1014 if (action->thread)
1015 __irq_wake_thread(desc, action);
1016 break;
1017 }
1018 }
1019 raw_spin_unlock_irqrestore(&desc->lock, flags);
1020}
1021EXPORT_SYMBOL_GPL(irq_wake_thread);
1022
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001023static int irq_setup_forced_threading(struct irqaction *new)
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001024{
1025 if (!force_irqthreads)
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001026 return 0;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001027 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001028 return 0;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001029
1030 new->flags |= IRQF_ONESHOT;
1031
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001032 /*
1033 * Handle the case where we have a real primary handler and a
1034 * thread handler. We force thread them as well by creating a
1035 * secondary action.
1036 */
1037 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1038 /* Allocate the secondary action */
1039 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1040 if (!new->secondary)
1041 return -ENOMEM;
1042 new->secondary->handler = irq_forced_secondary_handler;
1043 new->secondary->thread_fn = new->thread_fn;
1044 new->secondary->dev_id = new->dev_id;
1045 new->secondary->irq = new->irq;
1046 new->secondary->name = new->name;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001047 }
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001048 /* Deal with the primary handler */
1049 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1050 new->thread_fn = new->handler;
1051 new->handler = irq_default_primary_handler;
1052 return 0;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001053}
1054
Thomas Gleixnerc1bacba2014-03-08 08:59:58 +01001055static int irq_request_resources(struct irq_desc *desc)
1056{
1057 struct irq_data *d = &desc->irq_data;
1058 struct irq_chip *c = d->chip;
1059
1060 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1061}
1062
1063static void irq_release_resources(struct irq_desc *desc)
1064{
1065 struct irq_data *d = &desc->irq_data;
1066 struct irq_chip *c = d->chip;
1067
1068 if (c->irq_release_resources)
1069 c->irq_release_resources(d);
1070}
1071
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001072static int
1073setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1074{
1075 struct task_struct *t;
1076 struct sched_param param = {
1077 .sched_priority = MAX_USER_RT_PRIO/2,
1078 };
1079
1080 if (!secondary) {
1081 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1082 new->name);
1083 } else {
1084 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1085 new->name);
1086 param.sched_priority -= 1;
1087 }
1088
1089 if (IS_ERR(t))
1090 return PTR_ERR(t);
1091
1092 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1093
1094 /*
1095 * We keep the reference to the task struct even if
1096 * the thread dies to avoid that the interrupt code
1097 * references an already freed task_struct.
1098 */
1099 get_task_struct(t);
1100 new->thread = t;
1101 /*
1102 * Tell the thread to set its affinity. This is
1103 * important for shared interrupt handlers as we do
1104 * not invoke setup_affinity() for the secondary
1105 * handlers as everything is already set up. Even for
1106 * interrupts marked with IRQF_NO_BALANCE this is
1107 * correct as we want the thread to move to the cpu(s)
1108 * on which the requesting code placed the interrupt.
1109 */
1110 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1111 return 0;
1112}
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114/*
1115 * Internal function to register an irqaction - typically used to
1116 * allocate special interrupts that are part of the architecture.
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001117 *
1118 * Locking rules:
1119 *
1120 * desc->request_mutex Provides serialization against a concurrent free_irq()
1121 * chip_bus_lock Provides serialization for slow bus operations
1122 * desc->lock Provides serialization against hard interrupts
1123 *
1124 * chip_bus_lock and desc->lock are sufficient for all other management and
1125 * interrupt related functions. desc->request_mutex solely serializes
1126 * request/free_irq().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 */
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001128static int
Ingo Molnar327ec562009-02-15 11:21:37 +01001129__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
Ingo Molnarf17c7542009-02-17 20:43:37 +01001131 struct irqaction *old, **old_ptr;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001132 unsigned long flags, thread_mask = 0;
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001133 int ret, nested, shared = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001135 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -07001136 return -EINVAL;
1137
Thomas Gleixner6b8ff312010-10-01 12:58:38 +02001138 if (desc->irq_data.chip == &no_irq_chip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 return -ENOSYS;
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001140 if (!try_module_get(desc->owner))
1141 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001143 new->irq = irq;
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 /*
Jon Hunter4b357da2016-06-07 16:12:27 +01001146 * If the trigger type is not specified by the caller,
1147 * then use the default for this interrupt.
1148 */
1149 if (!(new->flags & IRQF_TRIGGER_MASK))
1150 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1151
1152 /*
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001153 * Check whether the interrupt nests into another interrupt
1154 * thread.
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001155 */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001156 nested = irq_settings_is_nested_thread(desc);
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001157 if (nested) {
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001158 if (!new->thread_fn) {
1159 ret = -EINVAL;
1160 goto out_mput;
1161 }
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001162 /*
1163 * Replace the primary handler which was provided from
1164 * the driver for non nested interrupt handling by the
1165 * dummy function which warns when called.
1166 */
1167 new->handler = irq_nested_primary_handler;
Thomas Gleixner8d32a302011-02-23 23:52:23 +00001168 } else {
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001169 if (irq_settings_can_thread(desc)) {
1170 ret = irq_setup_forced_threading(new);
1171 if (ret)
1172 goto out_mput;
1173 }
Thomas Gleixner399b5da2009-08-13 13:21:38 +02001174 }
1175
1176 /*
1177 * Create a handler thread when a thread function is supplied
1178 * and the interrupt does not nest into another interrupt
1179 * thread.
1180 */
1181 if (new->thread_fn && !nested) {
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001182 ret = setup_irq_thread(new, irq, false);
1183 if (ret)
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001184 goto out_mput;
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001185 if (new->secondary) {
1186 ret = setup_irq_thread(new->secondary, irq, true);
1187 if (ret)
1188 goto out_thread;
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001189 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001190 }
1191
1192 /*
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +02001193 * Drivers are often written to work w/o knowledge about the
1194 * underlying irq chip implementation, so a request for a
1195 * threaded irq without a primary hard irq context handler
1196 * requires the ONESHOT flag to be set. Some irq chips like
1197 * MSI based interrupts are per se one shot safe. Check the
1198 * chip flags, so we can avoid the unmask dance at the end of
1199 * the threaded handler for those.
1200 */
1201 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1202 new->flags &= ~IRQF_ONESHOT;
1203
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001204 /*
1205 * Protects against a concurrent __free_irq() call which might wait
1206 * for synchronize_irq() to complete without holding the optional
1207 * chip bus lock and desc->lock.
1208 */
Thomas Gleixner91140142017-06-29 23:33:37 +02001209 mutex_lock(&desc->request_mutex);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001210
1211 /*
1212 * Acquire bus lock as the irq_request_resources() callback below
1213 * might rely on the serialization or the magic power management
1214 * functions which are abusing the irq_bus_lock() callback,
1215 */
1216 chip_bus_lock(desc);
1217
1218 /* First installed action requests resources. */
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001219 if (!desc->action) {
1220 ret = irq_request_resources(desc);
1221 if (ret) {
1222 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1223 new->name, irq, desc->irq_data.chip->name);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001224 goto out_bus_unlock;
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001225 }
1226 }
Thomas Gleixner91140142017-06-29 23:33:37 +02001227
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +02001228 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 * The following block of code has to be executed atomically
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001230 * protected against a concurrent interrupt and any of the other
1231 * management calls which are not serialized via
1232 * desc->request_mutex or the optional bus lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 */
Thomas Gleixner239007b2009-11-17 16:46:45 +01001234 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarf17c7542009-02-17 20:43:37 +01001235 old_ptr = &desc->action;
1236 old = *old_ptr;
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001237 if (old) {
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001238 /*
1239 * Can't share interrupts unless both agree to and are
1240 * the same type (level, edge, polarity). So both flag
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001241 * fields must have IRQF_SHARED set and the bits which
Thomas Gleixner9d591ed2011-02-23 23:52:16 +00001242 * set the trigger type must match. Also all must
1243 * agree on ONESHOT.
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001244 */
Hans de Goede382bd4d2017-04-15 12:08:31 +02001245 unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
1246
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001247 if (!((old->flags & new->flags) & IRQF_SHARED) ||
Hans de Goede382bd4d2017-04-15 12:08:31 +02001248 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001249 ((old->flags ^ new->flags) & IRQF_ONESHOT))
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001250 goto mismatch;
1251
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001252 /* All handlers must agree on per-cpuness */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001253 if ((old->flags & IRQF_PERCPU) !=
1254 (new->flags & IRQF_PERCPU))
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001255 goto mismatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257 /* add new interrupt at end of irq queue */
1258 do {
Thomas Gleixner52abb702012-03-06 23:18:54 +01001259 /*
1260 * Or all existing action->thread_mask bits,
1261 * so we can find the next zero bit for this
1262 * new action.
1263 */
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001264 thread_mask |= old->thread_mask;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001265 old_ptr = &old->next;
1266 old = *old_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 } while (old);
1268 shared = 1;
1269 }
1270
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001271 /*
Thomas Gleixner52abb702012-03-06 23:18:54 +01001272 * Setup the thread mask for this irqaction for ONESHOT. For
1273 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1274 * conditional in irq_wake_thread().
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001275 */
Thomas Gleixner52abb702012-03-06 23:18:54 +01001276 if (new->flags & IRQF_ONESHOT) {
1277 /*
1278 * Unlikely to have 32 resp 64 irqs sharing one line,
1279 * but who knows.
1280 */
1281 if (thread_mask == ~0UL) {
1282 ret = -EBUSY;
Thomas Gleixnercba42352017-06-20 01:37:21 +02001283 goto out_unlock;
Thomas Gleixner52abb702012-03-06 23:18:54 +01001284 }
1285 /*
1286 * The thread_mask for the action is or'ed to
1287 * desc->thread_active to indicate that the
1288 * IRQF_ONESHOT thread handler has been woken, but not
1289 * yet finished. The bit is cleared when a thread
1290 * completes. When all threads of a shared interrupt
1291 * line have completed desc->threads_active becomes
1292 * zero and the interrupt line is unmasked. See
1293 * handle.c:irq_wake_thread() for further information.
1294 *
1295 * If no thread is woken by primary (hard irq context)
1296 * interrupt handlers, then desc->threads_active is
1297 * also checked for zero to unmask the irq line in the
1298 * affected hard irq flow handlers
1299 * (handle_[fasteoi|level]_irq).
1300 *
1301 * The new action gets the first zero bit of
1302 * thread_mask assigned. See the loop above which or's
1303 * all existing action->thread_mask bits.
1304 */
1305 new->thread_mask = 1 << ffz(thread_mask);
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001306
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +02001307 } else if (new->handler == irq_default_primary_handler &&
1308 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001309 /*
1310 * The interrupt was requested with handler = NULL, so
1311 * we use the default primary handler for it. But it
1312 * does not have the oneshot flag set. In combination
1313 * with level interrupts this is deadly, because the
1314 * default primary handler just wakes the thread, then
1315 * the irq lines is reenabled, but the device still
1316 * has the level irq asserted. Rinse and repeat....
1317 *
1318 * While this works for edge type interrupts, we play
1319 * it safe and reject unconditionally because we can't
1320 * say for sure which type this interrupt really
1321 * has. The type flags are unreliable as the
1322 * underlying chip implementation can override them.
1323 */
Andrew Morton97fd75b2012-05-31 16:26:07 -07001324 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001325 irq);
1326 ret = -EINVAL;
Thomas Gleixnercba42352017-06-20 01:37:21 +02001327 goto out_unlock;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001328 }
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 if (!shared) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001331 init_waitqueue_head(&desc->wait_for_threads);
1332
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001333 /* Setup the type (level, edge polarity) if configured: */
1334 if (new->flags & IRQF_TRIGGER_MASK) {
Jiang Liua1ff5412015-06-23 19:47:29 +02001335 ret = __irq_set_trigger(desc,
1336 new->flags & IRQF_TRIGGER_MASK);
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001337
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001338 if (ret)
Thomas Gleixnercba42352017-06-20 01:37:21 +02001339 goto out_unlock;
Thomas Gleixner091738a2011-02-14 20:16:43 +01001340 }
Ahmed S. Darwishf75d2222007-05-08 00:27:55 -07001341
Thomas Gleixner009b4c32011-02-07 21:48:49 +01001342 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
Thomas Gleixner32f41252011-03-28 14:10:52 +02001343 IRQS_ONESHOT | IRQS_WAITING);
1344 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
Thomas Gleixner94d39e12006-06-29 02:24:50 -07001345
Thomas Gleixnera0056772011-02-08 17:11:03 +01001346 if (new->flags & IRQF_PERCPU) {
1347 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1348 irq_settings_set_per_cpu(desc);
1349 }
Thomas Gleixner6a58fb32011-02-08 15:40:05 +01001350
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001351 if (new->flags & IRQF_ONESHOT)
Thomas Gleixner3d67bae2011-02-07 21:02:10 +01001352 desc->istate |= IRQS_ONESHOT;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001353
Thomas Gleixner2e051552017-06-20 01:37:23 +02001354 /* Exclude IRQ from balancing if requested */
1355 if (new->flags & IRQF_NOBALANCING) {
1356 irq_settings_set_no_balancing(desc);
1357 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1358 }
1359
Thomas Gleixner04c848d2017-05-31 11:58:33 +02001360 if (irq_settings_can_autoenable(desc)) {
Thomas Gleixner4cde9c62017-06-20 01:37:49 +02001361 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
Thomas Gleixner04c848d2017-05-31 11:58:33 +02001362 } else {
1363 /*
1364 * Shared interrupts do not go well with disabling
1365 * auto enable. The sharing interrupt might request
1366 * it while it's still disabled and then wait for
1367 * interrupts forever.
1368 */
1369 WARN_ON_ONCE(new->flags & IRQF_SHARED);
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001370 /* Undo nested disables: */
1371 desc->depth = 1;
Thomas Gleixner04c848d2017-05-31 11:58:33 +02001372 }
Max Krasnyansky18404752008-05-29 11:02:52 -07001373
Thomas Gleixner876dbd42011-02-08 17:28:12 +01001374 } else if (new->flags & IRQF_TRIGGER_MASK) {
1375 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
Thomas Gleixner7ee7e872016-11-07 19:57:00 +01001376 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
Thomas Gleixner876dbd42011-02-08 17:28:12 +01001377
1378 if (nmsk != omsk)
1379 /* hope the handler works with current trigger mode */
Joe Perchesa395d6a2016-03-22 14:28:09 -07001380 pr_warn("irq %d uses trigger mode %u; requested %u\n",
Thomas Gleixner7ee7e872016-11-07 19:57:00 +01001381 irq, omsk, nmsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001383
Ingo Molnarf17c7542009-02-17 20:43:37 +01001384 *old_ptr = new;
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001385
Thomas Gleixnercab303b2014-08-28 11:44:31 +02001386 irq_pm_install_action(desc, new);
1387
Linus Torvalds8528b0f2007-01-23 14:16:31 -08001388 /* Reset broken irq detection when installing new handler */
1389 desc->irq_count = 0;
1390 desc->irqs_unhandled = 0;
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001391
1392 /*
1393 * Check whether we disabled the irq via the spurious handler
1394 * before. Reenable it and give it another chance.
1395 */
Thomas Gleixner7acdd532011-02-07 20:40:54 +01001396 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1397 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
Jiang Liu79ff1cd2015-06-23 19:52:36 +02001398 __enable_irq(desc);
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001399 }
1400
Thomas Gleixner239007b2009-11-17 16:46:45 +01001401 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner3a907952017-06-29 23:33:36 +02001402 chip_bus_sync_unlock(desc);
Thomas Gleixner91140142017-06-29 23:33:37 +02001403 mutex_unlock(&desc->request_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Daniel Lezcanob2d3d612017-06-23 16:11:07 +02001405 irq_setup_timings(desc, new);
1406
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001407 /*
1408 * Strictly no need to wake it up, but hung_task complains
1409 * when no hard interrupt wakes the thread up.
1410 */
1411 if (new->thread)
1412 wake_up_process(new->thread);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001413 if (new->secondary)
1414 wake_up_process(new->secondary->thread);
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001415
Yinghai Lu2c6927a2008-08-19 20:50:11 -07001416 register_irq_proc(irq, desc);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001417 irq_add_debugfs_entry(irq, desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 new->dir = NULL;
1419 register_handler_proc(irq, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 return 0;
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001421
1422mismatch:
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001423 if (!(new->flags & IRQF_PROBE_SHARED)) {
Andrew Morton97fd75b2012-05-31 16:26:07 -07001424 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001425 irq, new->flags, new->name, old->flags, old->name);
1426#ifdef CONFIG_DEBUG_SHIRQ
Andrew Morton13e87ec2006-04-27 18:39:18 -07001427 dump_stack();
Alan Cox3f050442007-02-12 00:52:04 -08001428#endif
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001429 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001430 ret = -EBUSY;
1431
Thomas Gleixnercba42352017-06-20 01:37:21 +02001432out_unlock:
Dan Carpenter1c389792011-03-17 14:43:07 +03001433 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001434
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001435 if (!desc->action)
1436 irq_release_resources(desc);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001437out_bus_unlock:
1438 chip_bus_sync_unlock(desc);
Thomas Gleixner91140142017-06-29 23:33:37 +02001439 mutex_unlock(&desc->request_mutex);
1440
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001441out_thread:
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001442 if (new->thread) {
1443 struct task_struct *t = new->thread;
1444
1445 new->thread = NULL;
Alexander Gordeev05d74ef2012-03-09 14:59:40 +01001446 kthread_stop(t);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001447 put_task_struct(t);
1448 }
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001449 if (new->secondary && new->secondary->thread) {
1450 struct task_struct *t = new->secondary->thread;
1451
1452 new->secondary->thread = NULL;
1453 kthread_stop(t);
1454 put_task_struct(t);
1455 }
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001456out_mput:
1457 module_put(desc->owner);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001458 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459}
1460
1461/**
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001462 * setup_irq - setup an interrupt
1463 * @irq: Interrupt line to setup
1464 * @act: irqaction for the interrupt
1465 *
1466 * Used to statically setup interrupts in the early boot process.
1467 */
1468int setup_irq(unsigned int irq, struct irqaction *act)
1469{
David Daney986c0112011-02-09 16:04:25 -08001470 int retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001471 struct irq_desc *desc = irq_to_desc(irq);
1472
Jon Hunter9b5d5852016-05-10 16:14:35 +01001473 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001474 return -EINVAL;
Jon Hunterbe45beb2016-06-07 16:12:29 +01001475
1476 retval = irq_chip_pm_get(&desc->irq_data);
1477 if (retval < 0)
1478 return retval;
1479
David Daney986c0112011-02-09 16:04:25 -08001480 retval = __setup_irq(irq, desc, act);
David Daney986c0112011-02-09 16:04:25 -08001481
Jon Hunterbe45beb2016-06-07 16:12:29 +01001482 if (retval)
1483 irq_chip_pm_put(&desc->irq_data);
1484
David Daney986c0112011-02-09 16:04:25 -08001485 return retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001486}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001487EXPORT_SYMBOL_GPL(setup_irq);
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001488
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001489/*
Magnus Dammcbf94f02009-03-12 21:05:51 +09001490 * Internal function to unregister an irqaction - used to free
1491 * regular and special interrupts that are part of the architecture.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 */
Magnus Dammcbf94f02009-03-12 21:05:51 +09001493static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494{
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001495 struct irq_desc *desc = irq_to_desc(irq);
Ingo Molnarf17c7542009-02-17 20:43:37 +01001496 struct irqaction *action, **action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 unsigned long flags;
1498
Ingo Molnarae88a232009-02-15 11:29:50 +01001499 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001500
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001501 if (!desc)
Magnus Dammf21cfb22009-03-12 21:05:42 +09001502 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Thomas Gleixner91140142017-06-29 23:33:37 +02001504 mutex_lock(&desc->request_mutex);
Thomas Gleixnerabc7e402015-12-13 18:12:30 +01001505 chip_bus_lock(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +01001506 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarae88a232009-02-15 11:29:50 +01001507
1508 /*
1509 * There can be multiple actions per IRQ descriptor, find the right
1510 * one based on the dev_id:
1511 */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001512 action_ptr = &desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 for (;;) {
Ingo Molnarf17c7542009-02-17 20:43:37 +01001514 action = *action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Ingo Molnarae88a232009-02-15 11:29:50 +01001516 if (!action) {
1517 WARN(1, "Trying to free already-free IRQ %d\n", irq);
Thomas Gleixner239007b2009-11-17 16:46:45 +01001518 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixnerabc7e402015-12-13 18:12:30 +01001519 chip_bus_sync_unlock(desc);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001520 mutex_unlock(&desc->request_mutex);
Magnus Dammf21cfb22009-03-12 21:05:42 +09001521 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001523
Ingo Molnar8316e382009-02-17 20:28:29 +01001524 if (action->dev_id == dev_id)
1525 break;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001526 action_ptr = &action->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001528
1529 /* Found it - now remove it from the list of entries: */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001530 *action_ptr = action->next;
Ingo Molnarae88a232009-02-15 11:29:50 +01001531
Thomas Gleixnercab303b2014-08-28 11:44:31 +02001532 irq_pm_remove_action(desc, action);
1533
Ingo Molnarae88a232009-02-15 11:29:50 +01001534 /* If this was the last handler, shut down the IRQ line: */
Thomas Gleixnerc1bacba2014-03-08 08:59:58 +01001535 if (!desc->action) {
Thomas Gleixnere9849772015-10-09 23:28:58 +02001536 irq_settings_clr_disable_unlazy(desc);
Thomas Gleixner46999232011-02-02 21:41:14 +00001537 irq_shutdown(desc);
Thomas Gleixnerc1bacba2014-03-08 08:59:58 +01001538 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001539
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -07001540#ifdef CONFIG_SMP
1541 /* make sure affinity_hint is cleaned up */
1542 if (WARN_ON_ONCE(desc->affinity_hint))
1543 desc->affinity_hint = NULL;
1544#endif
1545
Thomas Gleixner239007b2009-11-17 16:46:45 +01001546 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001547 /*
1548 * Drop bus_lock here so the changes which were done in the chip
1549 * callbacks above are synced out to the irq chips which hang
1550 * behind a slow bus (I2C, SPI) before calling synchronize_irq().
1551 *
1552 * Aside of that the bus_lock can also be taken from the threaded
1553 * handler in irq_finalize_oneshot() which results in a deadlock
1554 * because synchronize_irq() would wait forever for the thread to
1555 * complete, which is blocked on the bus lock.
1556 *
1557 * The still held desc->request_mutex() protects against a
1558 * concurrent request_irq() of this irq so the release of resources
1559 * and timing data is properly serialized.
1560 */
Thomas Gleixnerabc7e402015-12-13 18:12:30 +01001561 chip_bus_sync_unlock(desc);
Ingo Molnarae88a232009-02-15 11:29:50 +01001562
1563 unregister_handler_proc(irq, action);
1564
1565 /* Make sure it's not being used on another CPU: */
1566 synchronize_irq(irq);
1567
1568#ifdef CONFIG_DEBUG_SHIRQ
1569 /*
1570 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1571 * event to happen even now it's being freed, so let's make sure that
1572 * is so by doing an extra call to the handler ....
1573 *
1574 * ( We do this after actually deregistering it, to make sure that a
1575 * 'real' IRQ doesn't run in * parallel with our fake. )
1576 */
1577 if (action->flags & IRQF_SHARED) {
1578 local_irq_save(flags);
1579 action->handler(irq, dev_id);
1580 local_irq_restore(flags);
1581 }
1582#endif
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001583
1584 if (action->thread) {
Alexander Gordeev05d74ef2012-03-09 14:59:40 +01001585 kthread_stop(action->thread);
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001586 put_task_struct(action->thread);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001587 if (action->secondary && action->secondary->thread) {
1588 kthread_stop(action->secondary->thread);
1589 put_task_struct(action->secondary->thread);
1590 }
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001591 }
1592
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001593 /* Last action releases resources */
Thomas Gleixner23438772017-06-29 23:33:39 +02001594 if (!desc->action) {
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001595 /*
1596 * Reaquire bus lock as irq_release_resources() might
1597 * require it to deallocate resources over the slow bus.
1598 */
1599 chip_bus_lock(desc);
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001600 irq_release_resources(desc);
Thomas Gleixner19d39a32017-07-11 23:41:52 +02001601 chip_bus_sync_unlock(desc);
Thomas Gleixner23438772017-06-29 23:33:39 +02001602 irq_remove_timings(desc);
1603 }
Thomas Gleixner46e48e22017-06-29 23:33:38 +02001604
Thomas Gleixner91140142017-06-29 23:33:37 +02001605 mutex_unlock(&desc->request_mutex);
1606
Jon Hunterbe45beb2016-06-07 16:12:29 +01001607 irq_chip_pm_put(&desc->irq_data);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001608 module_put(desc->owner);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001609 kfree(action->secondary);
Magnus Dammf21cfb22009-03-12 21:05:42 +09001610 return action;
1611}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613/**
Magnus Dammcbf94f02009-03-12 21:05:51 +09001614 * remove_irq - free an interrupt
1615 * @irq: Interrupt line to free
1616 * @act: irqaction for the interrupt
1617 *
1618 * Used to remove interrupts statically setup by the early boot process.
1619 */
1620void remove_irq(unsigned int irq, struct irqaction *act)
1621{
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001622 struct irq_desc *desc = irq_to_desc(irq);
1623
1624 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Christoph Hellwiga7e60e52017-04-13 09:06:40 +02001625 __free_irq(irq, act->dev_id);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001626}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001627EXPORT_SYMBOL_GPL(remove_irq);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001628
1629/**
Magnus Dammf21cfb22009-03-12 21:05:42 +09001630 * free_irq - free an interrupt allocated with request_irq
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 * @irq: Interrupt line to free
1632 * @dev_id: Device identity to free
1633 *
1634 * Remove an interrupt handler. The handler is removed and if the
1635 * interrupt line is no longer in use by any driver it is disabled.
1636 * On a shared IRQ the caller must ensure the interrupt is disabled
1637 * on the card it drives before calling this function. The function
1638 * does not return until any executing interrupts for this IRQ
1639 * have completed.
1640 *
1641 * This function must not be called from interrupt context.
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001642 *
1643 * Returns the devname argument passed to request_irq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 */
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001645const void *free_irq(unsigned int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001647 struct irq_desc *desc = irq_to_desc(irq);
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001648 struct irqaction *action;
1649 const char *devname;
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001650
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001651 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001652 return NULL;
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001653
Ben Hutchingscd7eab42011-01-19 21:01:44 +00001654#ifdef CONFIG_SMP
1655 if (WARN_ON(desc->affinity_notify))
1656 desc->affinity_notify = NULL;
1657#endif
1658
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001659 action = __free_irq(irq, dev_id);
Alexandru Moise2827a412017-09-19 22:04:12 +02001660
1661 if (!action)
1662 return NULL;
1663
Christoph Hellwig25ce4be2017-04-13 09:06:41 +02001664 devname = action->name;
1665 kfree(action);
1666 return devname;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668EXPORT_SYMBOL(free_irq);
1669
1670/**
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001671 * request_threaded_irq - allocate an interrupt line
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 * @irq: Interrupt line to allocate
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001673 * @handler: Function to be called when the IRQ occurs.
1674 * Primary handler for threaded interrupts
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001675 * If NULL and thread_fn != NULL the default
1676 * primary handler is installed
Thomas Gleixnerf48fe812009-03-24 11:46:22 +01001677 * @thread_fn: Function called from the irq handler thread
1678 * If NULL, no irq thread is created
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 * @irqflags: Interrupt type flags
1680 * @devname: An ascii name for the claiming device
1681 * @dev_id: A cookie passed back to the handler function
1682 *
1683 * This call allocates interrupt resources and enables the
1684 * interrupt line and IRQ handling. From the point this
1685 * call is made your handler function may be invoked. Since
1686 * your handler function must clear any interrupt the board
1687 * raises, you must take care both to initialise your hardware
1688 * and to set up the interrupt handler in the right order.
1689 *
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001690 * If you want to set up a threaded irq handler for your device
Javi Merino6d21af42011-10-26 10:16:11 +01001691 * then you need to supply @handler and @thread_fn. @handler is
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001692 * still called in hard interrupt context and has to check
1693 * whether the interrupt originates from the device. If yes it
1694 * needs to disable the interrupt on the device and return
Steven Rostedt39a2edd2009-05-12 14:35:54 -04001695 * IRQ_WAKE_THREAD which will wake up the handler thread and run
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001696 * @thread_fn. This split handler design is necessary to support
1697 * shared interrupts.
1698 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 * Dev_id must be globally unique. Normally the address of the
1700 * device data structure is used as the cookie. Since the handler
1701 * receives this value it makes sense to use it.
1702 *
1703 * If your interrupt is shared you must pass a non NULL dev_id
1704 * as this is required when freeing the interrupt.
1705 *
1706 * Flags:
1707 *
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001708 * IRQF_SHARED Interrupt is shared
David Brownell0c5d1eb2008-10-01 14:46:18 -07001709 * IRQF_TRIGGER_* Specify active edge(s) or level
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 *
1711 */
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001712int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1713 irq_handler_t thread_fn, unsigned long irqflags,
1714 const char *devname, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715{
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001716 struct irqaction *action;
Yinghai Lu08678b02008-08-19 20:50:05 -07001717 struct irq_desc *desc;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001718 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Chen Fane237a552016-02-15 12:52:01 +08001720 if (irq == IRQ_NOTCONNECTED)
1721 return -ENOTCONN;
1722
David Brownell470c6622008-12-01 14:31:37 -08001723 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 * Sanity-check: shared interrupts must pass in a real dev-ID,
1725 * otherwise we'll have trouble later trying to figure out
1726 * which interrupt is which (messes up the interrupt freeing
1727 * logic etc).
Rafael J. Wysocki17f48032015-02-27 00:07:55 +01001728 *
1729 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1730 * it cannot be set along with IRQF_NO_SUSPEND.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 */
Rafael J. Wysocki17f48032015-02-27 00:07:55 +01001732 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1733 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1734 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001736
Yinghai Lucb5bc832008-08-19 20:50:17 -07001737 desc = irq_to_desc(irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001738 if (!desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001740
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001741 if (!irq_settings_can_request(desc) ||
1742 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Thomas Gleixner6550c772006-06-29 02:24:49 -07001743 return -EINVAL;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001744
1745 if (!handler) {
1746 if (!thread_fn)
1747 return -EINVAL;
1748 handler = irq_default_primary_handler;
1749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
Thomas Gleixner45535732009-02-22 23:00:32 +01001751 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 if (!action)
1753 return -ENOMEM;
1754
1755 action->handler = handler;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001756 action->thread_fn = thread_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 action->flags = irqflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 action->name = devname;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 action->dev_id = dev_id;
1760
Jon Hunterbe45beb2016-06-07 16:12:29 +01001761 retval = irq_chip_pm_get(&desc->irq_data);
Shawn Lin4396f462016-08-22 16:21:52 +08001762 if (retval < 0) {
1763 kfree(action);
Jon Hunterbe45beb2016-06-07 16:12:29 +01001764 return retval;
Shawn Lin4396f462016-08-22 16:21:52 +08001765 }
Jon Hunterbe45beb2016-06-07 16:12:29 +01001766
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001767 retval = __setup_irq(irq, desc, action);
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001768
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001769 if (retval) {
Jon Hunterbe45beb2016-06-07 16:12:29 +01001770 irq_chip_pm_put(&desc->irq_data);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001771 kfree(action->secondary);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001772 kfree(action);
Thomas Gleixner2a1d3ab2015-09-21 11:01:10 +02001773 }
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001774
Thomas Gleixner6d83f942011-02-18 23:27:23 +01001775#ifdef CONFIG_DEBUG_SHIRQ_FIXME
Luis Henriques6ce51c42009-04-01 18:06:35 +01001776 if (!retval && (irqflags & IRQF_SHARED)) {
David Woodhousea304e1b2007-02-12 00:52:00 -08001777 /*
1778 * It's a shared IRQ -- the driver ought to be prepared for it
1779 * to happen immediately, so let's make sure....
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001780 * We disable the irq to make sure that a 'real' IRQ doesn't
1781 * run in parallel with our fake.
David Woodhousea304e1b2007-02-12 00:52:00 -08001782 */
Jarek Poplawski59845b12007-08-30 23:56:34 -07001783 unsigned long flags;
David Woodhousea304e1b2007-02-12 00:52:00 -08001784
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001785 disable_irq(irq);
Jarek Poplawski59845b12007-08-30 23:56:34 -07001786 local_irq_save(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001787
Jarek Poplawski59845b12007-08-30 23:56:34 -07001788 handler(irq, dev_id);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001789
Jarek Poplawski59845b12007-08-30 23:56:34 -07001790 local_irq_restore(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001791 enable_irq(irq);
David Woodhousea304e1b2007-02-12 00:52:00 -08001792 }
1793#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 return retval;
1795}
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001796EXPORT_SYMBOL(request_threaded_irq);
Marc Zyngierae731f82010-03-15 22:56:33 +00001797
1798/**
1799 * request_any_context_irq - allocate an interrupt line
1800 * @irq: Interrupt line to allocate
1801 * @handler: Function to be called when the IRQ occurs.
1802 * Threaded handler for threaded interrupts.
1803 * @flags: Interrupt type flags
1804 * @name: An ascii name for the claiming device
1805 * @dev_id: A cookie passed back to the handler function
1806 *
1807 * This call allocates interrupt resources and enables the
1808 * interrupt line and IRQ handling. It selects either a
1809 * hardirq or threaded handling method depending on the
1810 * context.
1811 *
1812 * On failure, it returns a negative value. On success,
1813 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1814 */
1815int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1816 unsigned long flags, const char *name, void *dev_id)
1817{
Chen Fane237a552016-02-15 12:52:01 +08001818 struct irq_desc *desc;
Marc Zyngierae731f82010-03-15 22:56:33 +00001819 int ret;
1820
Chen Fane237a552016-02-15 12:52:01 +08001821 if (irq == IRQ_NOTCONNECTED)
1822 return -ENOTCONN;
1823
1824 desc = irq_to_desc(irq);
Marc Zyngierae731f82010-03-15 22:56:33 +00001825 if (!desc)
1826 return -EINVAL;
1827
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001828 if (irq_settings_is_nested_thread(desc)) {
Marc Zyngierae731f82010-03-15 22:56:33 +00001829 ret = request_threaded_irq(irq, NULL, handler,
1830 flags, name, dev_id);
1831 return !ret ? IRQC_IS_NESTED : ret;
1832 }
1833
1834 ret = request_irq(irq, handler, flags, name, dev_id);
1835 return !ret ? IRQC_IS_HARDIRQ : ret;
1836}
1837EXPORT_SYMBOL_GPL(request_any_context_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001838
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001839void enable_percpu_irq(unsigned int irq, unsigned int type)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001840{
1841 unsigned int cpu = smp_processor_id();
1842 unsigned long flags;
1843 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1844
1845 if (!desc)
1846 return;
1847
Marc Zyngierf35ad082016-06-13 10:39:44 +01001848 /*
1849 * If the trigger type is not specified by the caller, then
1850 * use the default for this interrupt.
1851 */
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001852 type &= IRQ_TYPE_SENSE_MASK;
Marc Zyngierf35ad082016-06-13 10:39:44 +01001853 if (type == IRQ_TYPE_NONE)
1854 type = irqd_get_trigger_type(&desc->irq_data);
1855
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001856 if (type != IRQ_TYPE_NONE) {
1857 int ret;
1858
Jiang Liua1ff5412015-06-23 19:47:29 +02001859 ret = __irq_set_trigger(desc, type);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001860
1861 if (ret) {
Thomas Gleixner32cffdd2011-10-04 18:43:57 +02001862 WARN(1, "failed to set type for IRQ%d\n", irq);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001863 goto out;
1864 }
1865 }
1866
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001867 irq_percpu_enable(desc, cpu);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001868out:
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001869 irq_put_desc_unlock(desc, flags);
1870}
Chris Metcalf36a5df82013-02-01 15:04:26 -05001871EXPORT_SYMBOL_GPL(enable_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001872
Thomas Petazzonif0cb3222015-10-20 15:23:51 +02001873/**
1874 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1875 * @irq: Linux irq number to check for
1876 *
1877 * Must be called from a non migratable context. Returns the enable
1878 * state of a per cpu interrupt on the current cpu.
1879 */
1880bool irq_percpu_is_enabled(unsigned int irq)
1881{
1882 unsigned int cpu = smp_processor_id();
1883 struct irq_desc *desc;
1884 unsigned long flags;
1885 bool is_enabled;
1886
1887 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1888 if (!desc)
1889 return false;
1890
1891 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1892 irq_put_desc_unlock(desc, flags);
1893
1894 return is_enabled;
1895}
1896EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1897
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001898void disable_percpu_irq(unsigned int irq)
1899{
1900 unsigned int cpu = smp_processor_id();
1901 unsigned long flags;
1902 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1903
1904 if (!desc)
1905 return;
1906
1907 irq_percpu_disable(desc, cpu);
1908 irq_put_desc_unlock(desc, flags);
1909}
Chris Metcalf36a5df82013-02-01 15:04:26 -05001910EXPORT_SYMBOL_GPL(disable_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001911
1912/*
1913 * Internal function to unregister a percpu irqaction.
1914 */
1915static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1916{
1917 struct irq_desc *desc = irq_to_desc(irq);
1918 struct irqaction *action;
1919 unsigned long flags;
1920
1921 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1922
1923 if (!desc)
1924 return NULL;
1925
1926 raw_spin_lock_irqsave(&desc->lock, flags);
1927
1928 action = desc->action;
1929 if (!action || action->percpu_dev_id != dev_id) {
1930 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1931 goto bad;
1932 }
1933
1934 if (!cpumask_empty(desc->percpu_enabled)) {
1935 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1936 irq, cpumask_first(desc->percpu_enabled));
1937 goto bad;
1938 }
1939
1940 /* Found it - now remove it from the list of entries: */
1941 desc->action = NULL;
1942
1943 raw_spin_unlock_irqrestore(&desc->lock, flags);
1944
1945 unregister_handler_proc(irq, action);
1946
Jon Hunterbe45beb2016-06-07 16:12:29 +01001947 irq_chip_pm_put(&desc->irq_data);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001948 module_put(desc->owner);
1949 return action;
1950
1951bad:
1952 raw_spin_unlock_irqrestore(&desc->lock, flags);
1953 return NULL;
1954}
1955
1956/**
1957 * remove_percpu_irq - free a per-cpu interrupt
1958 * @irq: Interrupt line to free
1959 * @act: irqaction for the interrupt
1960 *
1961 * Used to remove interrupts statically setup by the early boot process.
1962 */
1963void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1964{
1965 struct irq_desc *desc = irq_to_desc(irq);
1966
1967 if (desc && irq_settings_is_per_cpu_devid(desc))
1968 __free_percpu_irq(irq, act->percpu_dev_id);
1969}
1970
1971/**
1972 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1973 * @irq: Interrupt line to free
1974 * @dev_id: Device identity to free
1975 *
1976 * Remove a percpu interrupt handler. The handler is removed, but
1977 * the interrupt line is not disabled. This must be done on each
1978 * CPU before calling this function. The function does not return
1979 * until any executing interrupts for this IRQ have completed.
1980 *
1981 * This function must not be called from interrupt context.
1982 */
1983void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1984{
1985 struct irq_desc *desc = irq_to_desc(irq);
1986
1987 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1988 return;
1989
1990 chip_bus_lock(desc);
1991 kfree(__free_percpu_irq(irq, dev_id));
1992 chip_bus_sync_unlock(desc);
1993}
Maxime Ripardaec2e2a2015-09-25 18:09:33 +02001994EXPORT_SYMBOL_GPL(free_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001995
1996/**
1997 * setup_percpu_irq - setup a per-cpu interrupt
1998 * @irq: Interrupt line to setup
1999 * @act: irqaction for the interrupt
2000 *
2001 * Used to statically setup per-cpu interrupts in the early boot process.
2002 */
2003int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2004{
2005 struct irq_desc *desc = irq_to_desc(irq);
2006 int retval;
2007
2008 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2009 return -EINVAL;
Jon Hunterbe45beb2016-06-07 16:12:29 +01002010
2011 retval = irq_chip_pm_get(&desc->irq_data);
2012 if (retval < 0)
2013 return retval;
2014
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002015 retval = __setup_irq(irq, desc, act);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002016
Jon Hunterbe45beb2016-06-07 16:12:29 +01002017 if (retval)
2018 irq_chip_pm_put(&desc->irq_data);
2019
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002020 return retval;
2021}
2022
2023/**
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002024 * __request_percpu_irq - allocate a percpu interrupt line
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002025 * @irq: Interrupt line to allocate
2026 * @handler: Function to be called when the IRQ occurs.
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002027 * @flags: Interrupt type flags (IRQF_TIMER only)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002028 * @devname: An ascii name for the claiming device
2029 * @dev_id: A percpu cookie passed back to the handler function
2030 *
Maxime Riparda1b7feb2015-09-25 18:09:32 +02002031 * This call allocates interrupt resources and enables the
2032 * interrupt on the local CPU. If the interrupt is supposed to be
2033 * enabled on other CPUs, it has to be done on each CPU using
2034 * enable_percpu_irq().
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002035 *
2036 * Dev_id must be globally unique. It is a per-cpu variable, and
2037 * the handler gets called with the interrupted CPU's instance of
2038 * that variable.
2039 */
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002040int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2041 unsigned long flags, const char *devname,
2042 void __percpu *dev_id)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002043{
2044 struct irqaction *action;
2045 struct irq_desc *desc;
2046 int retval;
2047
2048 if (!dev_id)
2049 return -EINVAL;
2050
2051 desc = irq_to_desc(irq);
2052 if (!desc || !irq_settings_can_request(desc) ||
2053 !irq_settings_is_per_cpu_devid(desc))
2054 return -EINVAL;
2055
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002056 if (flags && flags != IRQF_TIMER)
2057 return -EINVAL;
2058
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002059 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2060 if (!action)
2061 return -ENOMEM;
2062
2063 action->handler = handler;
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002064 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002065 action->name = devname;
2066 action->percpu_dev_id = dev_id;
2067
Jon Hunterbe45beb2016-06-07 16:12:29 +01002068 retval = irq_chip_pm_get(&desc->irq_data);
Shawn Lin4396f462016-08-22 16:21:52 +08002069 if (retval < 0) {
2070 kfree(action);
Jon Hunterbe45beb2016-06-07 16:12:29 +01002071 return retval;
Shawn Lin4396f462016-08-22 16:21:52 +08002072 }
Jon Hunterbe45beb2016-06-07 16:12:29 +01002073
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002074 retval = __setup_irq(irq, desc, action);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002075
Jon Hunterbe45beb2016-06-07 16:12:29 +01002076 if (retval) {
2077 irq_chip_pm_put(&desc->irq_data);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002078 kfree(action);
Jon Hunterbe45beb2016-06-07 16:12:29 +01002079 }
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01002080
2081 return retval;
2082}
Daniel Lezcanoc80081b2017-07-06 14:29:04 +02002083EXPORT_SYMBOL_GPL(__request_percpu_irq);
Marc Zyngier1b7047e2015-03-18 11:01:22 +00002084
2085/**
2086 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2087 * @irq: Interrupt line that is forwarded to a VM
2088 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2089 * @state: a pointer to a boolean where the state is to be storeed
2090 *
2091 * This call snapshots the internal irqchip state of an
2092 * interrupt, returning into @state the bit corresponding to
2093 * stage @which
2094 *
2095 * This function should be called with preemption disabled if the
2096 * interrupt controller has per-cpu registers.
2097 */
2098int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2099 bool *state)
2100{
2101 struct irq_desc *desc;
2102 struct irq_data *data;
2103 struct irq_chip *chip;
2104 unsigned long flags;
2105 int err = -EINVAL;
2106
2107 desc = irq_get_desc_buslock(irq, &flags, 0);
2108 if (!desc)
2109 return err;
2110
2111 data = irq_desc_get_irq_data(desc);
2112
2113 do {
2114 chip = irq_data_get_irq_chip(data);
2115 if (chip->irq_get_irqchip_state)
2116 break;
2117#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2118 data = data->parent_data;
2119#else
2120 data = NULL;
2121#endif
2122 } while (data);
2123
2124 if (data)
2125 err = chip->irq_get_irqchip_state(data, which, state);
2126
2127 irq_put_desc_busunlock(desc, flags);
2128 return err;
2129}
Bjorn Andersson1ee4fb32015-07-22 12:43:04 -07002130EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
Marc Zyngier1b7047e2015-03-18 11:01:22 +00002131
2132/**
2133 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2134 * @irq: Interrupt line that is forwarded to a VM
2135 * @which: State to be restored (one of IRQCHIP_STATE_*)
2136 * @val: Value corresponding to @which
2137 *
2138 * This call sets the internal irqchip state of an interrupt,
2139 * depending on the value of @which.
2140 *
2141 * This function should be called with preemption disabled if the
2142 * interrupt controller has per-cpu registers.
2143 */
2144int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2145 bool val)
2146{
2147 struct irq_desc *desc;
2148 struct irq_data *data;
2149 struct irq_chip *chip;
2150 unsigned long flags;
2151 int err = -EINVAL;
2152
2153 desc = irq_get_desc_buslock(irq, &flags, 0);
2154 if (!desc)
2155 return err;
2156
2157 data = irq_desc_get_irq_data(desc);
2158
2159 do {
2160 chip = irq_data_get_irq_chip(data);
2161 if (chip->irq_set_irqchip_state)
2162 break;
2163#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2164 data = data->parent_data;
2165#else
2166 data = NULL;
2167#endif
2168 } while (data);
2169
2170 if (data)
2171 err = chip->irq_set_irqchip_state(data, which, val);
2172
2173 irq_put_desc_busunlock(desc, flags);
2174 return err;
2175}
Bjorn Andersson1ee4fb32015-07-22 12:43:04 -07002176EXPORT_SYMBOL_GPL(irq_set_irqchip_state);