blob: 514bcfd855a8f9c05ef2934f364f6c2d7b600e4e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/irq/manage.c
3 *
Ingo Molnara34db9b2006-06-29 02:24:50 -07004 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
Andrew Morton97fd75b2012-05-31 16:26:07 -070010#define pr_fmt(fmt) "genirq: " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/irq.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010013#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070017#include <linux/slab.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010018#include <linux/sched.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060019#include <linux/sched/rt.h>
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +100020#include <linux/task_work.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include "internals.h"
23
Thomas Gleixner8d32a302011-02-23 23:52:23 +000024#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/**
36 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
Randy Dunlap1e5d5332005-11-07 01:01:06 -080037 * @irq: interrupt number to wait for
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 *
39 * This function waits for any pending IRQ handlers for this interrupt
40 * to complete before returning. If you use this function while
41 * holding a resource the IRQ handler may need you will deadlock.
42 *
43 * This function may be called - with care - from IRQ context.
44 */
45void synchronize_irq(unsigned int irq)
46{
Yinghai Lucb5bc832008-08-19 20:50:17 -070047 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixner32f41252011-03-28 14:10:52 +020048 bool inprogress;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Yinghai Lu7d94f7c2008-08-19 20:50:14 -070050 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -070051 return;
52
Herbert Xua98ce5c2007-10-23 11:26:25 +080053 do {
54 unsigned long flags;
55
56 /*
57 * Wait until we're out of the critical section. This might
58 * give the wrong answer due to the lack of memory barriers.
59 */
Thomas Gleixner32f41252011-03-28 14:10:52 +020060 while (irqd_irq_inprogress(&desc->irq_data))
Herbert Xua98ce5c2007-10-23 11:26:25 +080061 cpu_relax();
62
63 /* Ok, that indicated we're done: double-check carefully. */
Thomas Gleixner239007b2009-11-17 16:46:45 +010064 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner32f41252011-03-28 14:10:52 +020065 inprogress = irqd_irq_inprogress(&desc->irq_data);
Thomas Gleixner239007b2009-11-17 16:46:45 +010066 raw_spin_unlock_irqrestore(&desc->lock, flags);
Herbert Xua98ce5c2007-10-23 11:26:25 +080067
68 /* Oops, that failed? */
Thomas Gleixner32f41252011-03-28 14:10:52 +020069 } while (inprogress);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010070
71 /*
72 * We made sure that no hardirq handler is running. Now verify
73 * that no threaded handlers are active.
74 */
75 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
Linus Torvalds1da177e2005-04-16 15:20:36 -070077EXPORT_SYMBOL(synchronize_irq);
78
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010079#ifdef CONFIG_SMP
80cpumask_var_t irq_default_affinity;
81
Thomas Gleixner771ee3b2007-02-16 01:27:25 -080082/**
83 * irq_can_set_affinity - Check if the affinity of a given irq can be set
84 * @irq: Interrupt to check
85 *
86 */
87int irq_can_set_affinity(unsigned int irq)
88{
Yinghai Lu08678b02008-08-19 20:50:05 -070089 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixner771ee3b2007-02-16 01:27:25 -080090
Thomas Gleixnerbce43032011-02-10 22:37:41 +010091 if (!desc || !irqd_can_balance(&desc->irq_data) ||
92 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -080093 return 0;
94
95 return 1;
96}
97
Thomas Gleixner591d2fb2009-07-21 11:09:39 +020098/**
99 * irq_set_thread_affinity - Notify irq threads to adjust affinity
100 * @desc: irq descriptor which has affitnity changed
101 *
102 * We just set IRQTF_AFFINITY and delegate the affinity setting
103 * to the interrupt thread itself. We can not call
104 * set_cpus_allowed_ptr() here as we hold desc->lock and this
105 * code can be called from hard interrupt context.
106 */
107void irq_set_thread_affinity(struct irq_desc *desc)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100108{
109 struct irqaction *action = desc->action;
110
111 while (action) {
112 if (action->thread)
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200113 set_bit(IRQTF_AFFINITY, &action->thread_flags);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100114 action = action->next;
115 }
116}
117
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100118#ifdef CONFIG_GENERIC_PENDING_IRQ
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200119static inline bool irq_can_move_pcntxt(struct irq_data *data)
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100120{
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200121 return irqd_can_move_in_process_context(data);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100122}
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200123static inline bool irq_move_pending(struct irq_data *data)
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100124{
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200125 return irqd_is_setaffinity_pending(data);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100126}
127static inline void
128irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
129{
130 cpumask_copy(desc->pending_mask, mask);
131}
132static inline void
133irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
134{
135 cpumask_copy(mask, desc->pending_mask);
136}
137#else
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200138static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
Thomas Gleixnercd22c0e2011-03-29 11:36:05 +0200139static inline bool irq_move_pending(struct irq_data *data) { return false; }
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100140static inline void
141irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
142static inline void
143irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
144#endif
145
Jiang Liu818b0f32012-03-30 23:11:34 +0800146int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
147 bool force)
148{
149 struct irq_desc *desc = irq_data_to_desc(data);
150 struct irq_chip *chip = irq_data_get_irq_chip(data);
151 int ret;
152
153 ret = chip->irq_set_affinity(data, mask, false);
154 switch (ret) {
155 case IRQ_SET_MASK_OK:
156 cpumask_copy(data->affinity, mask);
157 case IRQ_SET_MASK_OK_NOCOPY:
158 irq_set_thread_affinity(desc);
159 ret = 0;
160 }
161
162 return ret;
163}
164
David Daneyc2d0c552011-03-25 12:38:50 -0700165int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
166{
167 struct irq_chip *chip = irq_data_get_irq_chip(data);
168 struct irq_desc *desc = irq_data_to_desc(data);
169 int ret = 0;
170
171 if (!chip || !chip->irq_set_affinity)
172 return -EINVAL;
173
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200174 if (irq_can_move_pcntxt(data)) {
Jiang Liu818b0f32012-03-30 23:11:34 +0800175 ret = irq_do_set_affinity(data, mask, false);
David Daneyc2d0c552011-03-25 12:38:50 -0700176 } else {
177 irqd_set_move_pending(data);
178 irq_copy_pending(desc, mask);
179 }
180
181 if (desc->affinity_notify) {
182 kref_get(&desc->affinity_notify->kref);
183 schedule_work(&desc->affinity_notify->work);
184 }
David Daneyc2d0c552011-03-25 12:38:50 -0700185 irqd_set(data, IRQD_AFFINITY_SET);
186
187 return ret;
188}
189
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800190/**
191 * irq_set_affinity - Set the irq affinity of a given irq
192 * @irq: Interrupt to set affinity
Randy Dunlap30398bf2011-03-18 09:33:56 -0700193 * @mask: cpumask
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800194 *
195 */
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100196int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800197{
Yinghai Lu08678b02008-08-19 20:50:05 -0700198 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100199 unsigned long flags;
David Daneyc2d0c552011-03-25 12:38:50 -0700200 int ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800201
David Daneyc2d0c552011-03-25 12:38:50 -0700202 if (!desc)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800203 return -EINVAL;
204
Thomas Gleixner239007b2009-11-17 16:46:45 +0100205 raw_spin_lock_irqsave(&desc->lock, flags);
David Daneyc2d0c552011-03-25 12:38:50 -0700206 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100207 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100208 return ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800209}
210
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700211int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
212{
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700213 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100214 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700215
216 if (!desc)
217 return -EINVAL;
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700218 desc->affinity_hint = m;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100219 irq_put_desc_unlock(desc, flags);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700220 return 0;
221}
222EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
223
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000224static void irq_affinity_notify(struct work_struct *work)
225{
226 struct irq_affinity_notify *notify =
227 container_of(work, struct irq_affinity_notify, work);
228 struct irq_desc *desc = irq_to_desc(notify->irq);
229 cpumask_var_t cpumask;
230 unsigned long flags;
231
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100232 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000233 goto out;
234
235 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner0ef5ca12011-03-28 21:59:37 +0200236 if (irq_move_pending(&desc->irq_data))
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100237 irq_get_pending(cpumask, desc);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000238 else
Thomas Gleixner1fb0ef32011-01-31 08:57:41 +0100239 cpumask_copy(cpumask, desc->irq_data.affinity);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000240 raw_spin_unlock_irqrestore(&desc->lock, flags);
241
242 notify->notify(notify, cpumask);
243
244 free_cpumask_var(cpumask);
245out:
246 kref_put(&notify->kref, notify->release);
247}
248
249/**
250 * irq_set_affinity_notifier - control notification of IRQ affinity changes
251 * @irq: Interrupt for which to enable/disable notification
252 * @notify: Context for notification, or %NULL to disable
253 * notification. Function pointers must be initialised;
254 * the other fields will be initialised by this function.
255 *
256 * Must be called in process context. Notification may only be enabled
257 * after the IRQ is allocated and must be disabled before the IRQ is
258 * freed using free_irq().
259 */
260int
261irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
262{
263 struct irq_desc *desc = irq_to_desc(irq);
264 struct irq_affinity_notify *old_notify;
265 unsigned long flags;
266
267 /* The release function is promised process context */
268 might_sleep();
269
270 if (!desc)
271 return -EINVAL;
272
273 /* Complete initialisation of *notify */
274 if (notify) {
275 notify->irq = irq;
276 kref_init(&notify->kref);
277 INIT_WORK(&notify->work, irq_affinity_notify);
278 }
279
280 raw_spin_lock_irqsave(&desc->lock, flags);
281 old_notify = desc->affinity_notify;
282 desc->affinity_notify = notify;
283 raw_spin_unlock_irqrestore(&desc->lock, flags);
284
285 if (old_notify)
286 kref_put(&old_notify->kref, old_notify->release);
287
288 return 0;
289}
290EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
291
Max Krasnyansky18404752008-05-29 11:02:52 -0700292#ifndef CONFIG_AUTO_IRQ_AFFINITY
293/*
294 * Generic version of the affinity autoselector.
295 */
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100296static int
297setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
Max Krasnyansky18404752008-05-29 11:02:52 -0700298{
Thomas Gleixner569bda82011-02-07 17:05:08 +0100299 struct cpumask *set = irq_default_affinity;
Jiang Liu818b0f32012-03-30 23:11:34 +0800300 int node = desc->irq_data.node;
Thomas Gleixner569bda82011-02-07 17:05:08 +0100301
Thomas Gleixnerb0082072011-02-07 17:30:50 +0100302 /* Excludes PER_CPU and NO_BALANCE interrupts */
Max Krasnyansky18404752008-05-29 11:02:52 -0700303 if (!irq_can_set_affinity(irq))
304 return 0;
305
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100306 /*
307 * Preserve an userspace affinity setup, but make sure that
308 * one of the targets is online.
309 */
Thomas Gleixner2bdd1052011-02-08 17:22:00 +0100310 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
Thomas Gleixner569bda82011-02-07 17:05:08 +0100311 if (cpumask_intersects(desc->irq_data.affinity,
312 cpu_online_mask))
313 set = desc->irq_data.affinity;
Thomas Gleixner0c6f8a82011-03-28 13:32:20 +0200314 else
Thomas Gleixner2bdd1052011-02-08 17:22:00 +0100315 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100316 }
Max Krasnyansky18404752008-05-29 11:02:52 -0700317
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100318 cpumask_and(mask, cpu_online_mask, set);
Prarit Bhargava241fc642012-03-26 15:02:18 -0400319 if (node != NUMA_NO_NODE) {
320 const struct cpumask *nodemask = cpumask_of_node(node);
321
322 /* make sure at least one of the cpus in nodemask is online */
323 if (cpumask_intersects(mask, nodemask))
324 cpumask_and(mask, mask, nodemask);
325 }
Jiang Liu818b0f32012-03-30 23:11:34 +0800326 irq_do_set_affinity(&desc->irq_data, mask, false);
Max Krasnyansky18404752008-05-29 11:02:52 -0700327 return 0;
328}
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100329#else
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100330static inline int
331setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100332{
333 return irq_select_affinity(irq);
334}
Max Krasnyansky18404752008-05-29 11:02:52 -0700335#endif
336
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100337/*
338 * Called when affinity is set via /proc/irq
339 */
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100340int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100341{
342 struct irq_desc *desc = irq_to_desc(irq);
343 unsigned long flags;
344 int ret;
345
Thomas Gleixner239007b2009-11-17 16:46:45 +0100346 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100347 ret = setup_affinity(irq, desc, mask);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100348 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100349 return ret;
350}
351
352#else
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100353static inline int
354setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100355{
356 return 0;
357}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358#endif
359
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100360void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
361{
362 if (suspend) {
Ian Campbell685fd0b2010-07-29 11:16:32 +0100363 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100364 return;
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100365 desc->istate |= IRQS_SUSPENDED;
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100366 }
367
Thomas Gleixner3aae9942011-02-04 10:17:52 +0100368 if (!desc->depth++)
Thomas Gleixner87923472011-02-03 12:27:44 +0100369 irq_disable(desc);
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100370}
371
Thomas Gleixner02725e72011-02-12 10:37:36 +0100372static int __disable_irq_nosync(unsigned int irq)
373{
374 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100375 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100376
377 if (!desc)
378 return -EINVAL;
379 __disable_irq(desc, irq, false);
380 irq_put_desc_busunlock(desc, flags);
381 return 0;
382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384/**
385 * disable_irq_nosync - disable an irq without waiting
386 * @irq: Interrupt to disable
387 *
388 * Disable the selected interrupt line. Disables and Enables are
389 * nested.
390 * Unlike disable_irq(), this function does not ensure existing
391 * instances of the IRQ handler have completed before returning.
392 *
393 * This function may be called from IRQ context.
394 */
395void disable_irq_nosync(unsigned int irq)
396{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100397 __disable_irq_nosync(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399EXPORT_SYMBOL(disable_irq_nosync);
400
401/**
402 * disable_irq - disable an irq and wait for completion
403 * @irq: Interrupt to disable
404 *
405 * Disable the selected interrupt line. Enables and Disables are
406 * nested.
407 * This function waits for any pending IRQ handlers for this interrupt
408 * to complete before returning. If you use this function while
409 * holding a resource the IRQ handler may need you will deadlock.
410 *
411 * This function may be called - with care - from IRQ context.
412 */
413void disable_irq(unsigned int irq)
414{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100415 if (!__disable_irq_nosync(irq))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 synchronize_irq(irq);
417}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418EXPORT_SYMBOL(disable_irq);
419
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100420void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200421{
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +0100422 if (resume) {
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100423 if (!(desc->istate & IRQS_SUSPENDED)) {
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +0100424 if (!desc->action)
425 return;
426 if (!(desc->action->flags & IRQF_FORCE_RESUME))
427 return;
428 /* Pretend that it got disabled ! */
429 desc->depth++;
430 }
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100431 desc->istate &= ~IRQS_SUSPENDED;
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +0100432 }
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100433
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200434 switch (desc->depth) {
435 case 0:
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100436 err_out:
Arjan van de Venb8c512f2008-07-25 19:45:36 -0700437 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200438 break;
439 case 1: {
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100440 if (desc->istate & IRQS_SUSPENDED)
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100441 goto err_out;
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200442 /* Prevent probing on this irq: */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100443 irq_settings_set_noprobe(desc);
Thomas Gleixner3aae9942011-02-04 10:17:52 +0100444 irq_enable(desc);
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200445 check_irq_resend(desc, irq);
446 /* fall-through */
447 }
448 default:
449 desc->depth--;
450 }
451}
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453/**
454 * enable_irq - enable handling of an irq
455 * @irq: Interrupt to enable
456 *
457 * Undoes the effect of one call to disable_irq(). If this
458 * matches the last disable, processing of interrupts on this
459 * IRQ line is re-enabled.
460 *
Thomas Gleixner70aedd22009-08-13 12:17:48 +0200461 * This function may be called from IRQ context only when
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200462 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 */
464void enable_irq(unsigned int irq)
465{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100467 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700469 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -0700470 return;
Thomas Gleixner50f7c032011-02-03 13:23:54 +0100471 if (WARN(!desc->irq_data.chip,
472 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
Thomas Gleixner02725e72011-02-12 10:37:36 +0100473 goto out;
Thomas Gleixner2656c362010-10-22 14:47:57 +0200474
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100475 __enable_irq(desc, irq, false);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100476out:
477 irq_put_desc_busunlock(desc, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479EXPORT_SYMBOL(enable_irq);
480
David Brownell0c5d1eb2008-10-01 14:46:18 -0700481static int set_irq_wake_real(unsigned int irq, unsigned int on)
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200482{
Yinghai Lu08678b02008-08-19 20:50:05 -0700483 struct irq_desc *desc = irq_to_desc(irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200484 int ret = -ENXIO;
485
Santosh Shilimkar60f96b42011-09-09 13:59:35 +0530486 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
487 return 0;
488
Thomas Gleixner2f7e99b2010-09-27 12:45:50 +0000489 if (desc->irq_data.chip->irq_set_wake)
490 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200491
492 return ret;
493}
494
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700495/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100496 * irq_set_irq_wake - control irq power management wakeup
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700497 * @irq: interrupt to control
498 * @on: enable/disable power management wakeup
499 *
David Brownell15a647e2006-07-30 03:03:08 -0700500 * Enable/disable power management wakeup mode, which is
501 * disabled by default. Enables and disables must match,
502 * just as they match for non-wakeup mode support.
503 *
504 * Wakeup mode lets this IRQ wake the system from sleep
505 * states like "suspend to RAM".
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700506 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100507int irq_set_irq_wake(unsigned int irq, unsigned int on)
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700508{
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700509 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100510 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200511 int ret = 0;
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700512
Jesper Juhl13863a62011-06-09 23:14:58 +0200513 if (!desc)
514 return -EINVAL;
515
David Brownell15a647e2006-07-30 03:03:08 -0700516 /* wakeup-capable irqs can be shared between drivers that
517 * don't need to have the same sleep mode behaviors.
518 */
David Brownell15a647e2006-07-30 03:03:08 -0700519 if (on) {
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200520 if (desc->wake_depth++ == 0) {
521 ret = set_irq_wake_real(irq, on);
522 if (ret)
523 desc->wake_depth = 0;
524 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100525 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200526 }
David Brownell15a647e2006-07-30 03:03:08 -0700527 } else {
528 if (desc->wake_depth == 0) {
Arjan van de Ven7a2c4772008-07-25 01:45:54 -0700529 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200530 } else if (--desc->wake_depth == 0) {
531 ret = set_irq_wake_real(irq, on);
532 if (ret)
533 desc->wake_depth = 1;
534 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100535 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200536 }
David Brownell15a647e2006-07-30 03:03:08 -0700537 }
Thomas Gleixner02725e72011-02-12 10:37:36 +0100538 irq_put_desc_busunlock(desc, flags);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700539 return ret;
540}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100541EXPORT_SYMBOL(irq_set_irq_wake);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543/*
544 * Internal function that tells the architecture code whether a
545 * particular irq has been exclusively allocated or is available
546 * for driver use.
547 */
548int can_request_irq(unsigned int irq, unsigned long irqflags)
549{
Thomas Gleixnercc8c3b72010-03-23 22:40:53 +0100550 unsigned long flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100551 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100552 int canrequest = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700554 if (!desc)
555 return 0;
556
Thomas Gleixner02725e72011-02-12 10:37:36 +0100557 if (irq_settings_can_request(desc)) {
Ben Hutchings2779db82013-06-28 02:40:30 +0100558 if (!desc->action ||
559 irqflags & desc->action->flags & IRQF_SHARED)
560 canrequest = 1;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100561 }
562 irq_put_desc_unlock(desc, flags);
563 return canrequest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
David Brownell0c5d1eb2008-10-01 14:46:18 -0700566int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000567 unsigned long flags)
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700568{
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200569 struct irq_chip *chip = desc->irq_data.chip;
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100570 int ret, unmask = 0;
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700571
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000572 if (!chip || !chip->irq_set_type) {
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700573 /*
574 * IRQF_TRIGGER_* but the PIC does not support multiple
575 * flow-types?
576 */
Andrew Morton97fd75b2012-05-31 16:26:07 -0700577 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
Thomas Gleixnerf5d89472012-04-19 12:06:13 +0200578 chip ? (chip->name ? : "unknown") : "unknown");
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700579 return 0;
580 }
581
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100582 flags &= IRQ_TYPE_SENSE_MASK;
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100583
584 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
Thomas Gleixner32f41252011-03-28 14:10:52 +0200585 if (!irqd_irq_masked(&desc->irq_data))
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100586 mask_irq(desc);
Thomas Gleixner32f41252011-03-28 14:10:52 +0200587 if (!irqd_irq_disabled(&desc->irq_data))
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100588 unmask = 1;
589 }
590
David Brownellf2b662d2008-12-01 14:31:38 -0800591 /* caller masked out all except trigger mode flags */
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000592 ret = chip->irq_set_type(&desc->irq_data, flags);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700593
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100594 switch (ret) {
595 case IRQ_SET_MASK_OK:
596 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
597 irqd_set(&desc->irq_data, flags);
598
599 case IRQ_SET_MASK_OK_NOCOPY:
600 flags = irqd_get_trigger_type(&desc->irq_data);
601 irq_settings_set_trigger_mask(desc, flags);
602 irqd_clear(&desc->irq_data, IRQD_LEVEL);
603 irq_settings_clr_level(desc);
604 if (flags & IRQ_TYPE_LEVEL_MASK) {
605 irq_settings_set_level(desc);
606 irqd_set(&desc->irq_data, IRQD_LEVEL);
607 }
Thomas Gleixner46732472010-06-07 17:53:51 +0200608
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100609 ret = 0;
Thomas Gleixner8fff39e2011-02-21 14:19:42 +0100610 break;
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100611 default:
Andrew Morton97fd75b2012-05-31 16:26:07 -0700612 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100613 flags, irq, chip->irq_set_type);
David Brownell0c5d1eb2008-10-01 14:46:18 -0700614 }
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100615 if (unmask)
616 unmask_irq(desc);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700617 return ret;
618}
619
Thomas Gleixner293a7a02012-10-16 15:07:49 -0700620#ifdef CONFIG_HARDIRQS_SW_RESEND
621int irq_set_parent(int irq, int parent_irq)
622{
623 unsigned long flags;
624 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
625
626 if (!desc)
627 return -EINVAL;
628
629 desc->parent_irq = parent_irq;
630
631 irq_put_desc_unlock(desc, flags);
632 return 0;
633}
634#endif
635
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200636/*
637 * Default primary interrupt handler for threaded interrupts. Is
638 * assigned as primary handler when request_threaded_irq is called
639 * with handler == NULL. Useful for oneshot interrupts.
640 */
641static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
642{
643 return IRQ_WAKE_THREAD;
644}
645
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200646/*
647 * Primary handler for nested threaded interrupts. Should never be
648 * called.
649 */
650static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
651{
652 WARN(1, "Primary handler called for nested irq %d\n", irq);
653 return IRQ_NONE;
654}
655
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100656static int irq_wait_for_interrupt(struct irqaction *action)
657{
Ido Yariv550acb12011-12-01 13:55:08 +0200658 set_current_state(TASK_INTERRUPTIBLE);
659
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100660 while (!kthread_should_stop()) {
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100661
662 if (test_and_clear_bit(IRQTF_RUNTHREAD,
663 &action->thread_flags)) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100664 __set_current_state(TASK_RUNNING);
665 return 0;
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100666 }
667 schedule();
Ido Yariv550acb12011-12-01 13:55:08 +0200668 set_current_state(TASK_INTERRUPTIBLE);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100669 }
Ido Yariv550acb12011-12-01 13:55:08 +0200670 __set_current_state(TASK_RUNNING);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100671 return -1;
672}
673
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200674/*
675 * Oneshot interrupts keep the irq line masked until the threaded
676 * handler finished. unmask if the interrupt has not been disabled and
677 * is marked MASKED.
678 */
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000679static void irq_finalize_oneshot(struct irq_desc *desc,
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100680 struct irqaction *action)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200681{
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000682 if (!(desc->istate & IRQS_ONESHOT))
683 return;
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100684again:
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000685 chip_bus_lock(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100686 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100687
688 /*
689 * Implausible though it may be we need to protect us against
690 * the following scenario:
691 *
692 * The thread is faster done than the hard interrupt handler
693 * on the other CPU. If we unmask the irq line then the
694 * interrupt can come in again and masks the line, leaves due
Thomas Gleixner009b4c32011-02-07 21:48:49 +0100695 * to IRQS_INPROGRESS and the irq line is masked forever.
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000696 *
697 * This also serializes the state of shared oneshot handlers
698 * versus "desc->threads_onehsot |= action->thread_mask;" in
699 * irq_wake_thread(). See the comment there which explains the
700 * serialization.
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100701 */
Thomas Gleixner32f41252011-03-28 14:10:52 +0200702 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100703 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000704 chip_bus_sync_unlock(desc);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100705 cpu_relax();
706 goto again;
707 }
708
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000709 /*
710 * Now check again, whether the thread should run. Otherwise
711 * we would clear the threads_oneshot bit of this thread which
712 * was just set.
713 */
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100714 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000715 goto out_unlock;
716
717 desc->threads_oneshot &= ~action->thread_mask;
718
Thomas Gleixner32f41252011-03-28 14:10:52 +0200719 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
720 irqd_irq_masked(&desc->irq_data))
721 unmask_irq(desc);
722
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000723out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100724 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000725 chip_bus_sync_unlock(desc);
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200726}
727
Bruno Premont61f38262009-07-22 22:22:32 +0200728#ifdef CONFIG_SMP
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100729/*
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100730 * Check whether we need to chasnge the affinity of the interrupt thread.
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200731 */
732static void
733irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
734{
735 cpumask_var_t mask;
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100736 bool valid = true;
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200737
738 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
739 return;
740
741 /*
742 * In case we are out of memory we set IRQTF_AFFINITY again and
743 * try again next time
744 */
745 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
746 set_bit(IRQTF_AFFINITY, &action->thread_flags);
747 return;
748 }
749
Thomas Gleixner239007b2009-11-17 16:46:45 +0100750 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100751 /*
752 * This code is triggered unconditionally. Check the affinity
753 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
754 */
755 if (desc->irq_data.affinity)
756 cpumask_copy(mask, desc->irq_data.affinity);
757 else
758 valid = false;
Thomas Gleixner239007b2009-11-17 16:46:45 +0100759 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200760
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100761 if (valid)
762 set_cpus_allowed_ptr(current, mask);
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200763 free_cpumask_var(mask);
764}
Bruno Premont61f38262009-07-22 22:22:32 +0200765#else
766static inline void
767irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
768#endif
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200769
770/*
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000771 * Interrupts which are not explicitely requested as threaded
772 * interrupts rely on the implicit bh/preempt disable of the hard irq
773 * context. So we need to disable bh here to avoid deadlocks and other
774 * side effects.
775 */
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200776static irqreturn_t
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000777irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
778{
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200779 irqreturn_t ret;
780
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000781 local_bh_disable();
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200782 ret = action->thread_fn(action->irq, action->dev_id);
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100783 irq_finalize_oneshot(desc, action);
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000784 local_bh_enable();
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200785 return ret;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000786}
787
788/*
789 * Interrupts explicitely requested as threaded interupts want to be
790 * preemtible - many of them need to sleep and wait for slow busses to
791 * complete.
792 */
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200793static irqreturn_t irq_thread_fn(struct irq_desc *desc,
794 struct irqaction *action)
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000795{
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200796 irqreturn_t ret;
797
798 ret = action->thread_fn(action->irq, action->dev_id);
Alexander Gordeevf3f79e32012-03-21 17:22:35 +0100799 irq_finalize_oneshot(desc, action);
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200800 return ret;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000801}
802
Ido Yariv7140ea12011-12-02 18:24:12 +0200803static void wake_threads_waitq(struct irq_desc *desc)
804{
805 if (atomic_dec_and_test(&desc->threads_active) &&
806 waitqueue_active(&desc->wait_for_threads))
807 wake_up(&desc->wait_for_threads);
808}
809
Al Viro67d12142012-06-27 11:07:19 +0400810static void irq_thread_dtor(struct callback_head *unused)
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000811{
812 struct task_struct *tsk = current;
813 struct irq_desc *desc;
814 struct irqaction *action;
815
816 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
817 return;
818
819 action = kthread_data(tsk);
820
Linus Torvaldsfb21aff2012-05-31 18:47:30 -0700821 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
Alan Cox19af3952012-12-18 14:21:25 -0800822 tsk->comm, tsk->pid, action->irq);
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000823
824
825 desc = irq_to_desc(action->irq);
826 /*
827 * If IRQTF_RUNTHREAD is set, we need to decrement
828 * desc->threads_active and wake possible waiters.
829 */
830 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
831 wake_threads_waitq(desc);
832
833 /* Prevent a stale desc->threads_oneshot */
834 irq_finalize_oneshot(desc, action);
835}
836
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000837/*
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100838 * Interrupt handler thread
839 */
840static int irq_thread(void *data)
841{
Al Viro67d12142012-06-27 11:07:19 +0400842 struct callback_head on_exit_work;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100843 struct irqaction *action = data;
844 struct irq_desc *desc = irq_to_desc(action->irq);
Sebastian Andrzej Siewior3a43e052011-05-31 08:56:11 +0200845 irqreturn_t (*handler_fn)(struct irq_desc *desc,
846 struct irqaction *action);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100847
Alexander Gordeev540b60e2012-03-09 14:59:13 +0100848 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000849 &action->thread_flags))
850 handler_fn = irq_forced_thread_fn;
851 else
852 handler_fn = irq_thread_fn;
853
Al Viro41f9d292012-06-26 22:10:04 +0400854 init_task_work(&on_exit_work, irq_thread_dtor);
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000855 task_work_add(current, &on_exit_work, false);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100856
Sankara Muthukrishnanf3de44e2012-10-31 15:41:23 -0500857 irq_thread_check_affinity(desc, action);
858
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100859 while (!irq_wait_for_interrupt(action)) {
Ido Yariv7140ea12011-12-02 18:24:12 +0200860 irqreturn_t action_ret;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100861
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200862 irq_thread_check_affinity(desc, action);
863
Ido Yariv7140ea12011-12-02 18:24:12 +0200864 action_ret = handler_fn(desc, action);
865 if (!noirqdebug)
866 note_interrupt(action->irq, desc, action_ret);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100867
Ido Yariv7140ea12011-12-02 18:24:12 +0200868 wake_threads_waitq(desc);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100869 }
870
Ido Yariv7140ea12011-12-02 18:24:12 +0200871 /*
872 * This is the regular exit path. __free_irq() is stopping the
873 * thread via kthread_stop() after calling
874 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
Thomas Gleixnere04268b2012-03-15 22:55:21 +0100875 * oneshot mask bit can be set. We cannot verify that as we
876 * cannot touch the oneshot mask at this point anymore as
877 * __setup_irq() might have given out currents thread_mask
878 * again.
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100879 */
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000880 task_work_cancel(current, irq_thread_dtor);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100881 return 0;
882}
883
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000884static void irq_setup_forced_threading(struct irqaction *new)
885{
886 if (!force_irqthreads)
887 return;
888 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
889 return;
890
891 new->flags |= IRQF_ONESHOT;
892
893 if (!new->thread_fn) {
894 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
895 new->thread_fn = new->handler;
896 new->handler = irq_default_primary_handler;
897 }
898}
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900/*
901 * Internal function to register an irqaction - typically used to
902 * allocate special interrupts that are part of the architecture.
903 */
Thomas Gleixnerd3c60042008-10-16 09:55:00 +0200904static int
Ingo Molnar327ec562009-02-15 11:21:37 +0100905__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Ingo Molnarf17c7542009-02-17 20:43:37 +0100907 struct irqaction *old, **old_ptr;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000908 unsigned long flags, thread_mask = 0;
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100909 int ret, nested, shared = 0;
910 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700912 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -0700913 return -EINVAL;
914
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200915 if (desc->irq_data.chip == &no_irq_chip)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 return -ENOSYS;
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200917 if (!try_module_get(desc->owner))
918 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
920 /*
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200921 * Check whether the interrupt nests into another interrupt
922 * thread.
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100923 */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100924 nested = irq_settings_is_nested_thread(desc);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200925 if (nested) {
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200926 if (!new->thread_fn) {
927 ret = -EINVAL;
928 goto out_mput;
929 }
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200930 /*
931 * Replace the primary handler which was provided from
932 * the driver for non nested interrupt handling by the
933 * dummy function which warns when called.
934 */
935 new->handler = irq_nested_primary_handler;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000936 } else {
Paul Mundt7f1b1242011-04-07 06:01:44 +0900937 if (irq_settings_can_thread(desc))
938 irq_setup_forced_threading(new);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200939 }
940
941 /*
942 * Create a handler thread when a thread function is supplied
943 * and the interrupt does not nest into another interrupt
944 * thread.
945 */
946 if (new->thread_fn && !nested) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100947 struct task_struct *t;
Ivo Siebenee238712013-06-03 12:12:02 +0200948 static const struct sched_param param = {
949 .sched_priority = MAX_USER_RT_PRIO/2,
950 };
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100951
952 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
953 new->name);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200954 if (IS_ERR(t)) {
955 ret = PTR_ERR(t);
956 goto out_mput;
957 }
Ivo Siebenee238712013-06-03 12:12:02 +0200958
959 sched_setscheduler(t, SCHED_FIFO, &param);
960
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100961 /*
962 * We keep the reference to the task struct even if
963 * the thread dies to avoid that the interrupt code
964 * references an already freed task_struct.
965 */
966 get_task_struct(t);
967 new->thread = t;
Thomas Gleixner04aa5302012-11-03 11:52:09 +0100968 /*
969 * Tell the thread to set its affinity. This is
970 * important for shared interrupt handlers as we do
971 * not invoke setup_affinity() for the secondary
972 * handlers as everything is already set up. Even for
973 * interrupts marked with IRQF_NO_BALANCE this is
974 * correct as we want the thread to move to the cpu(s)
975 * on which the requesting code placed the interrupt.
976 */
977 set_bit(IRQTF_AFFINITY, &new->thread_flags);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100978 }
979
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100980 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
981 ret = -ENOMEM;
982 goto out_thread;
983 }
984
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100985 /*
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +0200986 * Drivers are often written to work w/o knowledge about the
987 * underlying irq chip implementation, so a request for a
988 * threaded irq without a primary hard irq context handler
989 * requires the ONESHOT flag to be set. Some irq chips like
990 * MSI based interrupts are per se one shot safe. Check the
991 * chip flags, so we can avoid the unmask dance at the end of
992 * the threaded handler for those.
993 */
994 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
995 new->flags &= ~IRQF_ONESHOT;
996
997 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 * The following block of code has to be executed atomically
999 */
Thomas Gleixner239007b2009-11-17 16:46:45 +01001000 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarf17c7542009-02-17 20:43:37 +01001001 old_ptr = &desc->action;
1002 old = *old_ptr;
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001003 if (old) {
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001004 /*
1005 * Can't share interrupts unless both agree to and are
1006 * the same type (level, edge, polarity). So both flag
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001007 * fields must have IRQF_SHARED set and the bits which
Thomas Gleixner9d591ed2011-02-23 23:52:16 +00001008 * set the trigger type must match. Also all must
1009 * agree on ONESHOT.
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001010 */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001011 if (!((old->flags & new->flags) & IRQF_SHARED) ||
Thomas Gleixner9d591ed2011-02-23 23:52:16 +00001012 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001013 ((old->flags ^ new->flags) & IRQF_ONESHOT))
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001014 goto mismatch;
1015
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001016 /* All handlers must agree on per-cpuness */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001017 if ((old->flags & IRQF_PERCPU) !=
1018 (new->flags & IRQF_PERCPU))
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001019 goto mismatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021 /* add new interrupt at end of irq queue */
1022 do {
Thomas Gleixner52abb702012-03-06 23:18:54 +01001023 /*
1024 * Or all existing action->thread_mask bits,
1025 * so we can find the next zero bit for this
1026 * new action.
1027 */
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001028 thread_mask |= old->thread_mask;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001029 old_ptr = &old->next;
1030 old = *old_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 } while (old);
1032 shared = 1;
1033 }
1034
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001035 /*
Thomas Gleixner52abb702012-03-06 23:18:54 +01001036 * Setup the thread mask for this irqaction for ONESHOT. For
1037 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1038 * conditional in irq_wake_thread().
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001039 */
Thomas Gleixner52abb702012-03-06 23:18:54 +01001040 if (new->flags & IRQF_ONESHOT) {
1041 /*
1042 * Unlikely to have 32 resp 64 irqs sharing one line,
1043 * but who knows.
1044 */
1045 if (thread_mask == ~0UL) {
1046 ret = -EBUSY;
1047 goto out_mask;
1048 }
1049 /*
1050 * The thread_mask for the action is or'ed to
1051 * desc->thread_active to indicate that the
1052 * IRQF_ONESHOT thread handler has been woken, but not
1053 * yet finished. The bit is cleared when a thread
1054 * completes. When all threads of a shared interrupt
1055 * line have completed desc->threads_active becomes
1056 * zero and the interrupt line is unmasked. See
1057 * handle.c:irq_wake_thread() for further information.
1058 *
1059 * If no thread is woken by primary (hard irq context)
1060 * interrupt handlers, then desc->threads_active is
1061 * also checked for zero to unmask the irq line in the
1062 * affected hard irq flow handlers
1063 * (handle_[fasteoi|level]_irq).
1064 *
1065 * The new action gets the first zero bit of
1066 * thread_mask assigned. See the loop above which or's
1067 * all existing action->thread_mask bits.
1068 */
1069 new->thread_mask = 1 << ffz(thread_mask);
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001070
Thomas Gleixnerdc9b2292012-07-13 19:29:45 +02001071 } else if (new->handler == irq_default_primary_handler &&
1072 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001073 /*
1074 * The interrupt was requested with handler = NULL, so
1075 * we use the default primary handler for it. But it
1076 * does not have the oneshot flag set. In combination
1077 * with level interrupts this is deadly, because the
1078 * default primary handler just wakes the thread, then
1079 * the irq lines is reenabled, but the device still
1080 * has the level irq asserted. Rinse and repeat....
1081 *
1082 * While this works for edge type interrupts, we play
1083 * it safe and reject unconditionally because we can't
1084 * say for sure which type this interrupt really
1085 * has. The type flags are unreliable as the
1086 * underlying chip implementation can override them.
1087 */
Andrew Morton97fd75b2012-05-31 16:26:07 -07001088 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
Thomas Gleixner1c6c6952012-04-19 10:35:17 +02001089 irq);
1090 ret = -EINVAL;
1091 goto out_mask;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001092 }
Thomas Gleixnerb5faba22011-02-23 23:52:13 +00001093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 if (!shared) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001095 init_waitqueue_head(&desc->wait_for_threads);
1096
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001097 /* Setup the type (level, edge polarity) if configured: */
1098 if (new->flags & IRQF_TRIGGER_MASK) {
David Brownellf2b662d2008-12-01 14:31:38 -08001099 ret = __irq_set_trigger(desc, irq,
1100 new->flags & IRQF_TRIGGER_MASK);
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001101
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001102 if (ret)
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001103 goto out_mask;
Thomas Gleixner091738a2011-02-14 20:16:43 +01001104 }
Ahmed S. Darwishf75d2222007-05-08 00:27:55 -07001105
Thomas Gleixner009b4c32011-02-07 21:48:49 +01001106 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
Thomas Gleixner32f41252011-03-28 14:10:52 +02001107 IRQS_ONESHOT | IRQS_WAITING);
1108 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
Thomas Gleixner94d39e12006-06-29 02:24:50 -07001109
Thomas Gleixnera0056772011-02-08 17:11:03 +01001110 if (new->flags & IRQF_PERCPU) {
1111 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1112 irq_settings_set_per_cpu(desc);
1113 }
Thomas Gleixner6a58fb32011-02-08 15:40:05 +01001114
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001115 if (new->flags & IRQF_ONESHOT)
Thomas Gleixner3d67bae2011-02-07 21:02:10 +01001116 desc->istate |= IRQS_ONESHOT;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001117
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001118 if (irq_settings_can_autoenable(desc))
Thomas Gleixnerb4bc7242012-02-08 11:57:52 +01001119 irq_startup(desc, true);
Thomas Gleixner46999232011-02-02 21:41:14 +00001120 else
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001121 /* Undo nested disables: */
1122 desc->depth = 1;
Max Krasnyansky18404752008-05-29 11:02:52 -07001123
Thomas Gleixner612e3682008-11-07 13:58:46 +01001124 /* Exclude IRQ from balancing if requested */
Thomas Gleixnera0056772011-02-08 17:11:03 +01001125 if (new->flags & IRQF_NOBALANCING) {
1126 irq_settings_set_no_balancing(desc);
1127 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1128 }
Thomas Gleixner612e3682008-11-07 13:58:46 +01001129
Max Krasnyansky18404752008-05-29 11:02:52 -07001130 /* Set default affinity mask once everything is setup */
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001131 setup_affinity(irq, desc, mask);
David Brownell0c5d1eb2008-10-01 14:46:18 -07001132
Thomas Gleixner876dbd42011-02-08 17:28:12 +01001133 } else if (new->flags & IRQF_TRIGGER_MASK) {
1134 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1135 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1136
1137 if (nmsk != omsk)
1138 /* hope the handler works with current trigger mode */
Andrew Morton97fd75b2012-05-31 16:26:07 -07001139 pr_warning("irq %d uses trigger mode %u; requested %u\n",
Thomas Gleixner876dbd42011-02-08 17:28:12 +01001140 irq, nmsk, omsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 }
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001142
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001143 new->irq = irq;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001144 *old_ptr = new;
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001145
Linus Torvalds8528b0f2007-01-23 14:16:31 -08001146 /* Reset broken irq detection when installing new handler */
1147 desc->irq_count = 0;
1148 desc->irqs_unhandled = 0;
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001149
1150 /*
1151 * Check whether we disabled the irq via the spurious handler
1152 * before. Reenable it and give it another chance.
1153 */
Thomas Gleixner7acdd532011-02-07 20:40:54 +01001154 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1155 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +01001156 __enable_irq(desc, irq, false);
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001157 }
1158
Thomas Gleixner239007b2009-11-17 16:46:45 +01001159 raw_spin_unlock_irqrestore(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001161 /*
1162 * Strictly no need to wake it up, but hung_task complains
1163 * when no hard interrupt wakes the thread up.
1164 */
1165 if (new->thread)
1166 wake_up_process(new->thread);
1167
Yinghai Lu2c6927a2008-08-19 20:50:11 -07001168 register_irq_proc(irq, desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 new->dir = NULL;
1170 register_handler_proc(irq, new);
Xiaotian Feng4f5058c2011-04-02 19:39:35 +08001171 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
1173 return 0;
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001174
1175mismatch:
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001176 if (!(new->flags & IRQF_PROBE_SHARED)) {
Andrew Morton97fd75b2012-05-31 16:26:07 -07001177 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001178 irq, new->flags, new->name, old->flags, old->name);
1179#ifdef CONFIG_DEBUG_SHIRQ
Andrew Morton13e87ec2006-04-27 18:39:18 -07001180 dump_stack();
Alan Cox3f050442007-02-12 00:52:04 -08001181#endif
Thomas Gleixnerf5d89472012-04-19 12:06:13 +02001182 }
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001183 ret = -EBUSY;
1184
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001185out_mask:
Dan Carpenter1c389792011-03-17 14:43:07 +03001186 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001187 free_cpumask_var(mask);
1188
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001189out_thread:
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001190 if (new->thread) {
1191 struct task_struct *t = new->thread;
1192
1193 new->thread = NULL;
Alexander Gordeev05d74ef2012-03-09 14:59:40 +01001194 kthread_stop(t);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001195 put_task_struct(t);
1196 }
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001197out_mput:
1198 module_put(desc->owner);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001199 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200}
1201
1202/**
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001203 * setup_irq - setup an interrupt
1204 * @irq: Interrupt line to setup
1205 * @act: irqaction for the interrupt
1206 *
1207 * Used to statically setup interrupts in the early boot process.
1208 */
1209int setup_irq(unsigned int irq, struct irqaction *act)
1210{
David Daney986c0112011-02-09 16:04:25 -08001211 int retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001212 struct irq_desc *desc = irq_to_desc(irq);
1213
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001214 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1215 return -EINVAL;
David Daney986c0112011-02-09 16:04:25 -08001216 chip_bus_lock(desc);
1217 retval = __setup_irq(irq, desc, act);
1218 chip_bus_sync_unlock(desc);
1219
1220 return retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001221}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001222EXPORT_SYMBOL_GPL(setup_irq);
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001223
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001224/*
Magnus Dammcbf94f02009-03-12 21:05:51 +09001225 * Internal function to unregister an irqaction - used to free
1226 * regular and special interrupts that are part of the architecture.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 */
Magnus Dammcbf94f02009-03-12 21:05:51 +09001228static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001230 struct irq_desc *desc = irq_to_desc(irq);
Ingo Molnarf17c7542009-02-17 20:43:37 +01001231 struct irqaction *action, **action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 unsigned long flags;
1233
Ingo Molnarae88a232009-02-15 11:29:50 +01001234 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001235
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001236 if (!desc)
Magnus Dammf21cfb22009-03-12 21:05:42 +09001237 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Thomas Gleixner239007b2009-11-17 16:46:45 +01001239 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarae88a232009-02-15 11:29:50 +01001240
1241 /*
1242 * There can be multiple actions per IRQ descriptor, find the right
1243 * one based on the dev_id:
1244 */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001245 action_ptr = &desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 for (;;) {
Ingo Molnarf17c7542009-02-17 20:43:37 +01001247 action = *action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Ingo Molnarae88a232009-02-15 11:29:50 +01001249 if (!action) {
1250 WARN(1, "Trying to free already-free IRQ %d\n", irq);
Thomas Gleixner239007b2009-11-17 16:46:45 +01001251 raw_spin_unlock_irqrestore(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Magnus Dammf21cfb22009-03-12 21:05:42 +09001253 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001255
Ingo Molnar8316e382009-02-17 20:28:29 +01001256 if (action->dev_id == dev_id)
1257 break;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001258 action_ptr = &action->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001260
1261 /* Found it - now remove it from the list of entries: */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001262 *action_ptr = action->next;
Ingo Molnarae88a232009-02-15 11:29:50 +01001263
Ingo Molnarae88a232009-02-15 11:29:50 +01001264 /* If this was the last handler, shut down the IRQ line: */
Thomas Gleixner46999232011-02-02 21:41:14 +00001265 if (!desc->action)
1266 irq_shutdown(desc);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001267
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -07001268#ifdef CONFIG_SMP
1269 /* make sure affinity_hint is cleaned up */
1270 if (WARN_ON_ONCE(desc->affinity_hint))
1271 desc->affinity_hint = NULL;
1272#endif
1273
Thomas Gleixner239007b2009-11-17 16:46:45 +01001274 raw_spin_unlock_irqrestore(&desc->lock, flags);
Ingo Molnarae88a232009-02-15 11:29:50 +01001275
1276 unregister_handler_proc(irq, action);
1277
1278 /* Make sure it's not being used on another CPU: */
1279 synchronize_irq(irq);
1280
1281#ifdef CONFIG_DEBUG_SHIRQ
1282 /*
1283 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1284 * event to happen even now it's being freed, so let's make sure that
1285 * is so by doing an extra call to the handler ....
1286 *
1287 * ( We do this after actually deregistering it, to make sure that a
1288 * 'real' IRQ doesn't run in * parallel with our fake. )
1289 */
1290 if (action->flags & IRQF_SHARED) {
1291 local_irq_save(flags);
1292 action->handler(irq, dev_id);
1293 local_irq_restore(flags);
1294 }
1295#endif
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001296
1297 if (action->thread) {
Alexander Gordeev05d74ef2012-03-09 14:59:40 +01001298 kthread_stop(action->thread);
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001299 put_task_struct(action->thread);
1300 }
1301
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +02001302 module_put(desc->owner);
Magnus Dammf21cfb22009-03-12 21:05:42 +09001303 return action;
1304}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
1306/**
Magnus Dammcbf94f02009-03-12 21:05:51 +09001307 * remove_irq - free an interrupt
1308 * @irq: Interrupt line to free
1309 * @act: irqaction for the interrupt
1310 *
1311 * Used to remove interrupts statically setup by the early boot process.
1312 */
1313void remove_irq(unsigned int irq, struct irqaction *act)
1314{
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001315 struct irq_desc *desc = irq_to_desc(irq);
1316
1317 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1318 __free_irq(irq, act->dev_id);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001319}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001320EXPORT_SYMBOL_GPL(remove_irq);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001321
1322/**
Magnus Dammf21cfb22009-03-12 21:05:42 +09001323 * free_irq - free an interrupt allocated with request_irq
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * @irq: Interrupt line to free
1325 * @dev_id: Device identity to free
1326 *
1327 * Remove an interrupt handler. The handler is removed and if the
1328 * interrupt line is no longer in use by any driver it is disabled.
1329 * On a shared IRQ the caller must ensure the interrupt is disabled
1330 * on the card it drives before calling this function. The function
1331 * does not return until any executing interrupts for this IRQ
1332 * have completed.
1333 *
1334 * This function must not be called from interrupt context.
1335 */
1336void free_irq(unsigned int irq, void *dev_id)
1337{
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001338 struct irq_desc *desc = irq_to_desc(irq);
1339
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001340 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001341 return;
1342
Ben Hutchingscd7eab42011-01-19 21:01:44 +00001343#ifdef CONFIG_SMP
1344 if (WARN_ON(desc->affinity_notify))
1345 desc->affinity_notify = NULL;
1346#endif
1347
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001348 chip_bus_lock(desc);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001349 kfree(__free_irq(irq, dev_id));
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001350 chip_bus_sync_unlock(desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352EXPORT_SYMBOL(free_irq);
1353
1354/**
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001355 * request_threaded_irq - allocate an interrupt line
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 * @irq: Interrupt line to allocate
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001357 * @handler: Function to be called when the IRQ occurs.
1358 * Primary handler for threaded interrupts
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001359 * If NULL and thread_fn != NULL the default
1360 * primary handler is installed
Thomas Gleixnerf48fe812009-03-24 11:46:22 +01001361 * @thread_fn: Function called from the irq handler thread
1362 * If NULL, no irq thread is created
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 * @irqflags: Interrupt type flags
1364 * @devname: An ascii name for the claiming device
1365 * @dev_id: A cookie passed back to the handler function
1366 *
1367 * This call allocates interrupt resources and enables the
1368 * interrupt line and IRQ handling. From the point this
1369 * call is made your handler function may be invoked. Since
1370 * your handler function must clear any interrupt the board
1371 * raises, you must take care both to initialise your hardware
1372 * and to set up the interrupt handler in the right order.
1373 *
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001374 * If you want to set up a threaded irq handler for your device
Javi Merino6d21af42011-10-26 10:16:11 +01001375 * then you need to supply @handler and @thread_fn. @handler is
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001376 * still called in hard interrupt context and has to check
1377 * whether the interrupt originates from the device. If yes it
1378 * needs to disable the interrupt on the device and return
Steven Rostedt39a2edd2009-05-12 14:35:54 -04001379 * IRQ_WAKE_THREAD which will wake up the handler thread and run
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001380 * @thread_fn. This split handler design is necessary to support
1381 * shared interrupts.
1382 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 * Dev_id must be globally unique. Normally the address of the
1384 * device data structure is used as the cookie. Since the handler
1385 * receives this value it makes sense to use it.
1386 *
1387 * If your interrupt is shared you must pass a non NULL dev_id
1388 * as this is required when freeing the interrupt.
1389 *
1390 * Flags:
1391 *
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001392 * IRQF_SHARED Interrupt is shared
David Brownell0c5d1eb2008-10-01 14:46:18 -07001393 * IRQF_TRIGGER_* Specify active edge(s) or level
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 *
1395 */
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001396int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1397 irq_handler_t thread_fn, unsigned long irqflags,
1398 const char *devname, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399{
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001400 struct irqaction *action;
Yinghai Lu08678b02008-08-19 20:50:05 -07001401 struct irq_desc *desc;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001402 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
David Brownell470c6622008-12-01 14:31:37 -08001404 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 * Sanity-check: shared interrupts must pass in a real dev-ID,
1406 * otherwise we'll have trouble later trying to figure out
1407 * which interrupt is which (messes up the interrupt freeing
1408 * logic etc).
1409 */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001410 if ((irqflags & IRQF_SHARED) && !dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001412
Yinghai Lucb5bc832008-08-19 20:50:17 -07001413 desc = irq_to_desc(irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001414 if (!desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001416
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001417 if (!irq_settings_can_request(desc) ||
1418 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
Thomas Gleixner6550c772006-06-29 02:24:49 -07001419 return -EINVAL;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001420
1421 if (!handler) {
1422 if (!thread_fn)
1423 return -EINVAL;
1424 handler = irq_default_primary_handler;
1425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Thomas Gleixner45535732009-02-22 23:00:32 +01001427 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 if (!action)
1429 return -ENOMEM;
1430
1431 action->handler = handler;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001432 action->thread_fn = thread_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 action->flags = irqflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 action->name = devname;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 action->dev_id = dev_id;
1436
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001437 chip_bus_lock(desc);
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001438 retval = __setup_irq(irq, desc, action);
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001439 chip_bus_sync_unlock(desc);
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001440
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001441 if (retval)
1442 kfree(action);
1443
Thomas Gleixner6d83f942011-02-18 23:27:23 +01001444#ifdef CONFIG_DEBUG_SHIRQ_FIXME
Luis Henriques6ce51c42009-04-01 18:06:35 +01001445 if (!retval && (irqflags & IRQF_SHARED)) {
David Woodhousea304e1b2007-02-12 00:52:00 -08001446 /*
1447 * It's a shared IRQ -- the driver ought to be prepared for it
1448 * to happen immediately, so let's make sure....
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001449 * We disable the irq to make sure that a 'real' IRQ doesn't
1450 * run in parallel with our fake.
David Woodhousea304e1b2007-02-12 00:52:00 -08001451 */
Jarek Poplawski59845b12007-08-30 23:56:34 -07001452 unsigned long flags;
David Woodhousea304e1b2007-02-12 00:52:00 -08001453
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001454 disable_irq(irq);
Jarek Poplawski59845b12007-08-30 23:56:34 -07001455 local_irq_save(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001456
Jarek Poplawski59845b12007-08-30 23:56:34 -07001457 handler(irq, dev_id);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001458
Jarek Poplawski59845b12007-08-30 23:56:34 -07001459 local_irq_restore(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001460 enable_irq(irq);
David Woodhousea304e1b2007-02-12 00:52:00 -08001461 }
1462#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 return retval;
1464}
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001465EXPORT_SYMBOL(request_threaded_irq);
Marc Zyngierae731f82010-03-15 22:56:33 +00001466
1467/**
1468 * request_any_context_irq - allocate an interrupt line
1469 * @irq: Interrupt line to allocate
1470 * @handler: Function to be called when the IRQ occurs.
1471 * Threaded handler for threaded interrupts.
1472 * @flags: Interrupt type flags
1473 * @name: An ascii name for the claiming device
1474 * @dev_id: A cookie passed back to the handler function
1475 *
1476 * This call allocates interrupt resources and enables the
1477 * interrupt line and IRQ handling. It selects either a
1478 * hardirq or threaded handling method depending on the
1479 * context.
1480 *
1481 * On failure, it returns a negative value. On success,
1482 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1483 */
1484int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1485 unsigned long flags, const char *name, void *dev_id)
1486{
1487 struct irq_desc *desc = irq_to_desc(irq);
1488 int ret;
1489
1490 if (!desc)
1491 return -EINVAL;
1492
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001493 if (irq_settings_is_nested_thread(desc)) {
Marc Zyngierae731f82010-03-15 22:56:33 +00001494 ret = request_threaded_irq(irq, NULL, handler,
1495 flags, name, dev_id);
1496 return !ret ? IRQC_IS_NESTED : ret;
1497 }
1498
1499 ret = request_irq(irq, handler, flags, name, dev_id);
1500 return !ret ? IRQC_IS_HARDIRQ : ret;
1501}
1502EXPORT_SYMBOL_GPL(request_any_context_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001503
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001504void enable_percpu_irq(unsigned int irq, unsigned int type)
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001505{
1506 unsigned int cpu = smp_processor_id();
1507 unsigned long flags;
1508 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1509
1510 if (!desc)
1511 return;
1512
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001513 type &= IRQ_TYPE_SENSE_MASK;
1514 if (type != IRQ_TYPE_NONE) {
1515 int ret;
1516
1517 ret = __irq_set_trigger(desc, irq, type);
1518
1519 if (ret) {
Thomas Gleixner32cffdd2011-10-04 18:43:57 +02001520 WARN(1, "failed to set type for IRQ%d\n", irq);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001521 goto out;
1522 }
1523 }
1524
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001525 irq_percpu_enable(desc, cpu);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +01001526out:
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001527 irq_put_desc_unlock(desc, flags);
1528}
Chris Metcalf36a5df82013-02-01 15:04:26 -05001529EXPORT_SYMBOL_GPL(enable_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001530
1531void disable_percpu_irq(unsigned int irq)
1532{
1533 unsigned int cpu = smp_processor_id();
1534 unsigned long flags;
1535 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1536
1537 if (!desc)
1538 return;
1539
1540 irq_percpu_disable(desc, cpu);
1541 irq_put_desc_unlock(desc, flags);
1542}
Chris Metcalf36a5df82013-02-01 15:04:26 -05001543EXPORT_SYMBOL_GPL(disable_percpu_irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001544
1545/*
1546 * Internal function to unregister a percpu irqaction.
1547 */
1548static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1549{
1550 struct irq_desc *desc = irq_to_desc(irq);
1551 struct irqaction *action;
1552 unsigned long flags;
1553
1554 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1555
1556 if (!desc)
1557 return NULL;
1558
1559 raw_spin_lock_irqsave(&desc->lock, flags);
1560
1561 action = desc->action;
1562 if (!action || action->percpu_dev_id != dev_id) {
1563 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1564 goto bad;
1565 }
1566
1567 if (!cpumask_empty(desc->percpu_enabled)) {
1568 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1569 irq, cpumask_first(desc->percpu_enabled));
1570 goto bad;
1571 }
1572
1573 /* Found it - now remove it from the list of entries: */
1574 desc->action = NULL;
1575
1576 raw_spin_unlock_irqrestore(&desc->lock, flags);
1577
1578 unregister_handler_proc(irq, action);
1579
1580 module_put(desc->owner);
1581 return action;
1582
1583bad:
1584 raw_spin_unlock_irqrestore(&desc->lock, flags);
1585 return NULL;
1586}
1587
1588/**
1589 * remove_percpu_irq - free a per-cpu interrupt
1590 * @irq: Interrupt line to free
1591 * @act: irqaction for the interrupt
1592 *
1593 * Used to remove interrupts statically setup by the early boot process.
1594 */
1595void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1596{
1597 struct irq_desc *desc = irq_to_desc(irq);
1598
1599 if (desc && irq_settings_is_per_cpu_devid(desc))
1600 __free_percpu_irq(irq, act->percpu_dev_id);
1601}
1602
1603/**
1604 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1605 * @irq: Interrupt line to free
1606 * @dev_id: Device identity to free
1607 *
1608 * Remove a percpu interrupt handler. The handler is removed, but
1609 * the interrupt line is not disabled. This must be done on each
1610 * CPU before calling this function. The function does not return
1611 * until any executing interrupts for this IRQ have completed.
1612 *
1613 * This function must not be called from interrupt context.
1614 */
1615void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1616{
1617 struct irq_desc *desc = irq_to_desc(irq);
1618
1619 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1620 return;
1621
1622 chip_bus_lock(desc);
1623 kfree(__free_percpu_irq(irq, dev_id));
1624 chip_bus_sync_unlock(desc);
1625}
1626
1627/**
1628 * setup_percpu_irq - setup a per-cpu interrupt
1629 * @irq: Interrupt line to setup
1630 * @act: irqaction for the interrupt
1631 *
1632 * Used to statically setup per-cpu interrupts in the early boot process.
1633 */
1634int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1635{
1636 struct irq_desc *desc = irq_to_desc(irq);
1637 int retval;
1638
1639 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1640 return -EINVAL;
1641 chip_bus_lock(desc);
1642 retval = __setup_irq(irq, desc, act);
1643 chip_bus_sync_unlock(desc);
1644
1645 return retval;
1646}
1647
1648/**
1649 * request_percpu_irq - allocate a percpu interrupt line
1650 * @irq: Interrupt line to allocate
1651 * @handler: Function to be called when the IRQ occurs.
1652 * @devname: An ascii name for the claiming device
1653 * @dev_id: A percpu cookie passed back to the handler function
1654 *
1655 * This call allocates interrupt resources, but doesn't
1656 * automatically enable the interrupt. It has to be done on each
1657 * CPU using enable_percpu_irq().
1658 *
1659 * Dev_id must be globally unique. It is a per-cpu variable, and
1660 * the handler gets called with the interrupted CPU's instance of
1661 * that variable.
1662 */
1663int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1664 const char *devname, void __percpu *dev_id)
1665{
1666 struct irqaction *action;
1667 struct irq_desc *desc;
1668 int retval;
1669
1670 if (!dev_id)
1671 return -EINVAL;
1672
1673 desc = irq_to_desc(irq);
1674 if (!desc || !irq_settings_can_request(desc) ||
1675 !irq_settings_is_per_cpu_devid(desc))
1676 return -EINVAL;
1677
1678 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1679 if (!action)
1680 return -ENOMEM;
1681
1682 action->handler = handler;
Marc Zyngier2ed0e642011-11-16 12:27:39 +00001683 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +01001684 action->name = devname;
1685 action->percpu_dev_id = dev_id;
1686
1687 chip_bus_lock(desc);
1688 retval = __setup_irq(irq, desc, action);
1689 chip_bus_sync_unlock(desc);
1690
1691 if (retval)
1692 kfree(action);
1693
1694 return retval;
1695}