blob: 673fa64c1c4413908f477f868223cd22a7380cf8 [file] [log] [blame]
Thomas Gleixner52a65ff2018-03-14 22:15:19 +01001// SPDX-License-Identifier: GPL-2.0
2
Paul Mundt54a90582012-05-19 15:11:47 +09003#define pr_fmt(fmt) "irq: " fmt
4
Marc Zyngierc5c601c2017-07-07 09:39:59 +01005#include <linux/acpi.h>
Grant Likelycc79ca62012-02-16 01:37:49 -07006#include <linux/debugfs.h>
7#include <linux/hardirq.h>
8#include <linux/interrupt.h>
Grant Likely08a543a2011-07-26 03:19:06 -06009#include <linux/irq.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070010#include <linux/irqdesc.h>
Grant Likely08a543a2011-07-26 03:19:06 -060011#include <linux/irqdomain.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/of.h>
Grant Likely7e713302011-07-26 03:19:06 -060015#include <linux/of_address.h>
Rashika Kheria64be38a2014-02-27 17:10:12 +053016#include <linux/of_irq.h>
Paul Mundt5ca4db62012-06-03 22:04:34 -070017#include <linux/topology.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070018#include <linux/seq_file.h>
Grant Likely7e713302011-07-26 03:19:06 -060019#include <linux/slab.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070020#include <linux/smp.h>
21#include <linux/fs.h>
Grant Likely08a543a2011-07-26 03:19:06 -060022
23static LIST_HEAD(irq_domain_list);
24static DEFINE_MUTEX(irq_domain_mutex);
25
Grant Likely68700652012-02-14 14:06:53 -070026static struct irq_domain *irq_default_domain;
Grant Likelycc79ca62012-02-16 01:37:49 -070027
Jiang Liuf8264e32014-11-06 22:20:14 +080028static void irq_domain_check_hierarchy(struct irq_domain *domain);
29
Marc Zyngierb145dcc2015-10-13 12:51:36 +010030struct irqchip_fwid {
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020031 struct fwnode_handle fwnode;
32 unsigned int type;
33 char *name;
Marc Zyngierb977fcf2019-07-31 15:13:19 +010034 phys_addr_t *pa;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010035};
36
Thomas Gleixner087cdfb2017-06-20 01:37:17 +020037#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
38static void debugfs_add_domain_dir(struct irq_domain *d);
39static void debugfs_remove_domain_dir(struct irq_domain *d);
40#else
41static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
42static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
43#endif
44
David Woodhouse2cbd5a42020-10-24 22:35:22 +010045static const char *irqchip_fwnode_get_name(const struct fwnode_handle *fwnode)
46{
47 struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
48
49 return fwid->name;
50}
51
52const struct fwnode_operations irqchip_fwnode_ops = {
53 .get_name = irqchip_fwnode_get_name,
54};
Arnd Bergmannb6eb66f2017-07-26 02:19:35 +020055EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
Sakari Ailusdb3e50f2017-07-21 14:39:31 +030056
Marc Zyngierb145dcc2015-10-13 12:51:36 +010057/**
luanshib513df62020-03-03 09:48:45 +080058 * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
Marc Zyngierb145dcc2015-10-13 12:51:36 +010059 * identifying an irq domain
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020060 * @type: Type of irqchip_fwnode. See linux/irqdomain.h
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020061 * @id: Optional user provided id if name != NULL
luanshib513df62020-03-03 09:48:45 +080062 * @name: Optional user provided domain name
Yi Wang0ed9ca252019-10-19 17:07:27 +080063 * @pa: Optional user-provided physical address
Marc Zyngierb145dcc2015-10-13 12:51:36 +010064 *
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020065 * Allocate a struct irqchip_fwid, and return a poiner to the embedded
Marc Zyngierb145dcc2015-10-13 12:51:36 +010066 * fwnode_handle (or NULL on failure).
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020067 *
68 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
69 * solely to transport name information to irqdomain creation code. The
70 * node is not stored. For other types the pointer is kept in the irq
71 * domain struct.
Marc Zyngierb145dcc2015-10-13 12:51:36 +010072 */
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020073struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
Marc Zyngierb977fcf2019-07-31 15:13:19 +010074 const char *name,
75 phys_addr_t *pa)
Marc Zyngierb145dcc2015-10-13 12:51:36 +010076{
77 struct irqchip_fwid *fwid;
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020078 char *n;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010079
80 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010081
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020082 switch (type) {
83 case IRQCHIP_FWNODE_NAMED:
84 n = kasprintf(GFP_KERNEL, "%s", name);
85 break;
86 case IRQCHIP_FWNODE_NAMED_ID:
87 n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
88 break;
89 default:
Marc Zyngierb977fcf2019-07-31 15:13:19 +010090 n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020091 break;
92 }
93
94 if (!fwid || !n) {
Marc Zyngierb145dcc2015-10-13 12:51:36 +010095 kfree(fwid);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020096 kfree(n);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010097 return NULL;
98 }
99
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200100 fwid->type = type;
101 fwid->name = n;
Marc Zyngierb977fcf2019-07-31 15:13:19 +0100102 fwid->pa = pa;
Sakari Ailusdb3e50f2017-07-21 14:39:31 +0300103 fwid->fwnode.ops = &irqchip_fwnode_ops;
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100104 return &fwid->fwnode;
105}
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200106EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100107
108/**
109 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
110 *
111 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
112 */
113void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
114{
115 struct irqchip_fwid *fwid;
116
Suravee Suthikulpanit75aba7b2015-12-10 08:55:28 -0800117 if (WARN_ON(!is_fwnode_irqchip(fwnode)))
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100118 return;
119
120 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
121 kfree(fwid->name);
122 kfree(fwid);
123}
Jake Oshinsa4289dc2015-12-10 17:52:59 +0000124EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100125
Grant Likelycc79ca62012-02-16 01:37:49 -0700126/**
Grant Likelyfa40f372013-06-08 12:57:40 +0100127 * __irq_domain_add() - Allocate a new irq_domain data structure
Punit Agrawal545d5d62016-05-31 13:56:48 +0100128 * @fwnode: firmware node for the interrupt controller
Grant Likelyfa40f372013-06-08 12:57:40 +0100129 * @size: Size of linear map; 0 for radix mapping only
Jiang Liua2579542014-05-27 16:07:37 +0800130 * @hwirq_max: Maximum number of interrupts supported by controller
Grant Likelyfa40f372013-06-08 12:57:40 +0100131 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
132 * direct mapping
Jiang Liuf8264e32014-11-06 22:20:14 +0800133 * @ops: domain callbacks
Grant Likelya8db8cf2012-02-14 14:06:54 -0700134 * @host_data: Controller private data pointer
Grant Likelycc79ca62012-02-16 01:37:49 -0700135 *
Zenghui Yu3a1d24c2019-07-06 04:41:12 +0000136 * Allocates and initializes an irq_domain structure.
Jiang Liua2579542014-05-27 16:07:37 +0800137 * Returns pointer to IRQ domain, or NULL on failure.
Grant Likelycc79ca62012-02-16 01:37:49 -0700138 */
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100139struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
Grant Likelyddaf1442013-06-10 01:06:02 +0100140 irq_hw_number_t hwirq_max, int direct_max,
Grant Likelyfa40f372013-06-08 12:57:40 +0100141 const struct irq_domain_ops *ops,
142 void *host_data)
Grant Likelycc79ca62012-02-16 01:37:49 -0700143{
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200144 struct irqchip_fwid *fwid;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700145 struct irq_domain *domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700146
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200147 static atomic_t unknown_domains;
148
Grant Likelycef50752012-07-11 17:24:31 +0100149 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300150 GFP_KERNEL, of_node_to_nid(to_of_node(fwnode)));
Geert Uytterhoeven43b98d82019-05-27 13:57:42 +0200151 if (!domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700152 return NULL;
153
Zenghui Yu45e95042020-07-16 16:39:05 +0800154 if (is_fwnode_irqchip(fwnode)) {
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200155 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
156
157 switch (fwid->type) {
158 case IRQCHIP_FWNODE_NAMED:
159 case IRQCHIP_FWNODE_NAMED_ID:
Dexuan Cui711419e2019-09-02 23:14:56 +0000160 domain->fwnode = fwnode;
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200161 domain->name = kstrdup(fwid->name, GFP_KERNEL);
162 if (!domain->name) {
163 kfree(domain);
164 return NULL;
165 }
166 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
167 break;
168 default:
169 domain->fwnode = fwnode;
170 domain->name = fwid->name;
171 break;
172 }
Andy Shevchenko9ed78b02020-05-20 19:49:27 +0300173 } else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) ||
174 is_software_node(fwnode)) {
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200175 char *name;
176
177 /*
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300178 * fwnode paths contain '/', which debugfs is legitimately
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200179 * unhappy about. Replace them with ':', which does
180 * the trick and is not as offensive as '\'...
181 */
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300182 name = kasprintf(GFP_KERNEL, "%pfw", fwnode);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200183 if (!name) {
184 kfree(domain);
185 return NULL;
186 }
187
188 strreplace(name, '/', ':');
189
190 domain->name = name;
191 domain->fwnode = fwnode;
192 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
193 }
194
195 if (!domain->name) {
Sakari Ailusdb3e50f2017-07-21 14:39:31 +0300196 if (fwnode)
197 pr_err("Invalid fwnode type for irqdomain\n");
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200198 domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
199 atomic_inc_return(&unknown_domains));
200 if (!domain->name) {
201 kfree(domain);
202 return NULL;
203 }
204 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
205 }
206
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300207 fwnode_handle_get(fwnode);
Marc Zyngierf1107112015-10-13 12:51:30 +0100208
Grant Likelycc79ca62012-02-16 01:37:49 -0700209 /* Fill structure */
Grant Likely1aa0dd92013-06-08 12:03:59 +0100210 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900211 mutex_init(&domain->revmap_tree_mutex);
Grant Likely68700652012-02-14 14:06:53 -0700212 domain->ops = ops;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700213 domain->host_data = host_data;
Grant Likelyddaf1442013-06-10 01:06:02 +0100214 domain->hwirq_max = hwirq_max;
Grant Likely1aa0dd92013-06-08 12:03:59 +0100215 domain->revmap_size = size;
Grant Likelyfa40f372013-06-08 12:57:40 +0100216 domain->revmap_direct_max_irq = direct_max;
Jiang Liuf8264e32014-11-06 22:20:14 +0800217 irq_domain_check_hierarchy(domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700218
Grant Likelya8db8cf2012-02-14 14:06:54 -0700219 mutex_lock(&irq_domain_mutex);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +0200220 debugfs_add_domain_dir(domain);
Grant Likelya8db8cf2012-02-14 14:06:54 -0700221 list_add(&domain->link, &irq_domain_list);
222 mutex_unlock(&irq_domain_mutex);
Grant Likelyfa40f372013-06-08 12:57:40 +0100223
Grant Likely1aa0dd92013-06-08 12:03:59 +0100224 pr_debug("Added domain %s\n", domain->name);
Grant Likelyfa40f372013-06-08 12:57:40 +0100225 return domain;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700226}
Grant Likelyfa40f372013-06-08 12:57:40 +0100227EXPORT_SYMBOL_GPL(__irq_domain_add);
Grant Likelya8db8cf2012-02-14 14:06:54 -0700228
Paul Mundt58ee99a2012-05-19 15:11:41 +0900229/**
230 * irq_domain_remove() - Remove an irq domain.
231 * @domain: domain to remove
232 *
233 * This routine is used to remove an irq domain. The caller must ensure
234 * that all mappings within the domain have been disposed of prior to
235 * use, depending on the revmap type.
236 */
237void irq_domain_remove(struct irq_domain *domain)
238{
239 mutex_lock(&irq_domain_mutex);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +0200240 debugfs_remove_domain_dir(domain);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900241
Matthew Wilcoxe9256ef2016-05-20 17:01:33 -0700242 WARN_ON(!radix_tree_empty(&domain->revmap_tree));
Paul Mundt58ee99a2012-05-19 15:11:41 +0900243
244 list_del(&domain->link);
245
246 /*
247 * If the going away domain is the default one, reset it.
248 */
249 if (unlikely(irq_default_domain == domain))
250 irq_set_default_host(NULL);
251
252 mutex_unlock(&irq_domain_mutex);
253
Grant Likely1aa0dd92013-06-08 12:03:59 +0100254 pr_debug("Removed domain %s\n", domain->name);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900255
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300256 fwnode_handle_put(domain->fwnode);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200257 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
258 kfree(domain->name);
Grant Likelyfa40f372013-06-08 12:57:40 +0100259 kfree(domain);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900260}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900261EXPORT_SYMBOL_GPL(irq_domain_remove);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900262
Marc Zyngier61d0a002017-06-22 11:34:57 +0100263void irq_domain_update_bus_token(struct irq_domain *domain,
264 enum irq_domain_bus_token bus_token)
265{
266 char *name;
267
268 if (domain->bus_token == bus_token)
269 return;
270
271 mutex_lock(&irq_domain_mutex);
272
273 domain->bus_token = bus_token;
274
275 name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
276 if (!name) {
277 mutex_unlock(&irq_domain_mutex);
278 return;
279 }
280
281 debugfs_remove_domain_dir(domain);
282
283 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
284 kfree(domain->name);
285 else
286 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
287
288 domain->name = name;
289 debugfs_add_domain_dir(domain);
290
291 mutex_unlock(&irq_domain_mutex);
292}
John Stultz8a667922020-07-10 23:18:22 +0000293EXPORT_SYMBOL_GPL(irq_domain_update_bus_token);
Marc Zyngier61d0a002017-06-22 11:34:57 +0100294
Grant Likelya8db8cf2012-02-14 14:06:54 -0700295/**
Grant Likelyfa40f372013-06-08 12:57:40 +0100296 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
Mark Brown781d0f42012-07-05 12:19:19 +0100297 * @of_node: pointer to interrupt controller's device tree node.
298 * @size: total number of irqs in mapping
Linus Walleij94a63da2013-06-06 12:10:23 +0100299 * @first_irq: first number of irq block assigned to the domain,
Grant Likelyfa40f372013-06-08 12:57:40 +0100300 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
301 * pre-map all of the irqs in the domain to virqs starting at first_irq.
Jiang Liuf8264e32014-11-06 22:20:14 +0800302 * @ops: domain callbacks
Mark Brown781d0f42012-07-05 12:19:19 +0100303 * @host_data: Controller private data pointer
304 *
Grant Likelyfa40f372013-06-08 12:57:40 +0100305 * Allocates an irq_domain, and optionally if first_irq is positive then also
306 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
Mark Brown781d0f42012-07-05 12:19:19 +0100307 *
308 * This is intended to implement the expected behaviour for most
Grant Likelyfa40f372013-06-08 12:57:40 +0100309 * interrupt controllers. If device tree is used, then first_irq will be 0 and
310 * irqs get mapped dynamically on the fly. However, if the controller requires
311 * static virq assignments (non-DT boot) then it will set that up correctly.
Mark Brown781d0f42012-07-05 12:19:19 +0100312 */
313struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
314 unsigned int size,
315 unsigned int first_irq,
316 const struct irq_domain_ops *ops,
317 void *host_data)
318{
Grant Likelyfa40f372013-06-08 12:57:40 +0100319 struct irq_domain *domain;
Linus Walleij2854d162012-09-27 14:59:39 +0200320
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100321 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
Grant Likelyfa40f372013-06-08 12:57:40 +0100322 if (!domain)
323 return NULL;
324
325 if (first_irq > 0) {
Linus Walleij2854d162012-09-27 14:59:39 +0200326 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
Grant Likelyfa40f372013-06-08 12:57:40 +0100327 /* attempt to allocated irq_descs */
328 int rc = irq_alloc_descs(first_irq, first_irq, size,
329 of_node_to_nid(of_node));
330 if (rc < 0)
Linus Walleijd202b7b2012-11-27 01:20:32 +0100331 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
332 first_irq);
Grant Likelyfa40f372013-06-08 12:57:40 +0100333 }
Grant Likelyddaf1442013-06-10 01:06:02 +0100334 irq_domain_associate_many(domain, first_irq, 0, size);
Linus Walleij2854d162012-09-27 14:59:39 +0200335 }
336
Grant Likelyfa40f372013-06-08 12:57:40 +0100337 return domain;
Mark Brown781d0f42012-07-05 12:19:19 +0100338}
Arnd Bergmann346dbb72013-04-25 19:28:54 +0200339EXPORT_SYMBOL_GPL(irq_domain_add_simple);
Mark Brown781d0f42012-07-05 12:19:19 +0100340
341/**
Grant Likelya8db8cf2012-02-14 14:06:54 -0700342 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
343 * @of_node: pointer to interrupt controller's device tree node.
Grant Likely1bc04f22012-02-14 14:06:55 -0700344 * @size: total number of irqs in legacy mapping
345 * @first_irq: first number of irq block assigned to the domain
346 * @first_hwirq: first hwirq number to use for the translation. Should normally
347 * be '0', but a positive integer can be used if the effective
348 * hwirqs numbering does not begin at zero.
Grant Likelya8db8cf2012-02-14 14:06:54 -0700349 * @ops: map/unmap domain callbacks
350 * @host_data: Controller private data pointer
351 *
352 * Note: the map() callback will be called before this function returns
353 * for all legacy interrupts except 0 (which is always the invalid irq for
354 * a legacy controller).
355 */
356struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
Grant Likely1bc04f22012-02-14 14:06:55 -0700357 unsigned int size,
358 unsigned int first_irq,
359 irq_hw_number_t first_hwirq,
Grant Likelya18dc812012-01-26 12:12:14 -0700360 const struct irq_domain_ops *ops,
Grant Likelya8db8cf2012-02-14 14:06:54 -0700361 void *host_data)
362{
Grant Likely1bc04f22012-02-14 14:06:55 -0700363 struct irq_domain *domain;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700364
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100365 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
Grant Likelyddaf1442013-06-10 01:06:02 +0100366 first_hwirq + size, 0, ops, host_data);
Jiang Liuf8264e32014-11-06 22:20:14 +0800367 if (domain)
368 irq_domain_associate_many(domain, first_irq, first_hwirq, size);
Grant Likely1bc04f22012-02-14 14:06:55 -0700369
Grant Likelya8db8cf2012-02-14 14:06:54 -0700370 return domain;
371}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900372EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
Grant Likelycc79ca62012-02-16 01:37:49 -0700373
Grant Likelya8db8cf2012-02-14 14:06:54 -0700374/**
Marc Zyngier651e8b52016-04-11 09:57:51 +0100375 * irq_find_matching_fwspec() - Locates a domain for a given fwspec
376 * @fwspec: FW specifier for an interrupt
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100377 * @bus_token: domain-specific data
Grant Likelycc79ca62012-02-16 01:37:49 -0700378 */
Marc Zyngier651e8b52016-04-11 09:57:51 +0100379struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
Marc Zyngier130b8c62015-10-13 12:51:31 +0100380 enum irq_domain_bus_token bus_token)
Grant Likelycc79ca62012-02-16 01:37:49 -0700381{
382 struct irq_domain *h, *found = NULL;
Marc Zyngier651e8b52016-04-11 09:57:51 +0100383 struct fwnode_handle *fwnode = fwspec->fwnode;
Grant Likelya18dc812012-01-26 12:12:14 -0700384 int rc;
Grant Likelycc79ca62012-02-16 01:37:49 -0700385
386 /* We might want to match the legacy controller last since
387 * it might potentially be set to match all interrupts in
388 * the absence of a device node. This isn't a problem so far
389 * yet though...
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100390 *
391 * bus_token == DOMAIN_BUS_ANY matches any domain, any other
392 * values must generate an exact match for the domain to be
393 * selected.
Grant Likelycc79ca62012-02-16 01:37:49 -0700394 */
395 mutex_lock(&irq_domain_mutex);
Grant Likelya18dc812012-01-26 12:12:14 -0700396 list_for_each_entry(h, &irq_domain_list, link) {
Marc Zyngier651e8b52016-04-11 09:57:51 +0100397 if (h->ops->select && fwspec->param_count)
398 rc = h->ops->select(h, fwspec, bus_token);
399 else if (h->ops->match)
Marc Zyngier130b8c62015-10-13 12:51:31 +0100400 rc = h->ops->match(h, to_of_node(fwnode), bus_token);
Grant Likelya18dc812012-01-26 12:12:14 -0700401 else
Marc Zyngier130b8c62015-10-13 12:51:31 +0100402 rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100403 ((bus_token == DOMAIN_BUS_ANY) ||
404 (h->bus_token == bus_token)));
Grant Likelya18dc812012-01-26 12:12:14 -0700405
406 if (rc) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700407 found = h;
408 break;
409 }
Grant Likelya18dc812012-01-26 12:12:14 -0700410 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700411 mutex_unlock(&irq_domain_mutex);
412 return found;
413}
Marc Zyngier651e8b52016-04-11 09:57:51 +0100414EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
Grant Likelycc79ca62012-02-16 01:37:49 -0700415
416/**
Eric Augerc7b41f02017-01-19 20:57:59 +0000417 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
418 * IRQ remapping
419 *
420 * Return: false if any MSI irq domain does not support IRQ remapping,
421 * true otherwise (including if there is no MSI irq domain)
422 */
423bool irq_domain_check_msi_remap(void)
424{
425 struct irq_domain *h;
426 bool ret = true;
427
428 mutex_lock(&irq_domain_mutex);
429 list_for_each_entry(h, &irq_domain_list, link) {
430 if (irq_domain_is_msi(h) &&
431 !irq_domain_hierarchical_is_msi_remap(h)) {
432 ret = false;
433 break;
434 }
435 }
436 mutex_unlock(&irq_domain_mutex);
437 return ret;
438}
439EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
440
441/**
Grant Likelycc79ca62012-02-16 01:37:49 -0700442 * irq_set_default_host() - Set a "default" irq domain
Grant Likely68700652012-02-14 14:06:53 -0700443 * @domain: default domain pointer
Grant Likelycc79ca62012-02-16 01:37:49 -0700444 *
445 * For convenience, it's possible to set a "default" domain that will be used
446 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
447 * platforms that want to manipulate a few hard coded interrupt numbers that
448 * aren't properly represented in the device-tree.
449 */
Grant Likely68700652012-02-14 14:06:53 -0700450void irq_set_default_host(struct irq_domain *domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700451{
Paul Mundt54a90582012-05-19 15:11:47 +0900452 pr_debug("Default domain set to @0x%p\n", domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700453
Grant Likely68700652012-02-14 14:06:53 -0700454 irq_default_domain = domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700455}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900456EXPORT_SYMBOL_GPL(irq_set_default_host);
Grant Likelycc79ca62012-02-16 01:37:49 -0700457
Marc Zyngier9f199dd2019-02-20 08:59:23 +0000458/**
459 * irq_get_default_host() - Retrieve the "default" irq domain
460 *
461 * Returns: the default domain, if any.
462 *
463 * Modern code should never use this. This should only be used on
464 * systems that cannot implement a firmware->fwnode mapping (which
465 * both DT and ACPI provide).
466 */
467struct irq_domain *irq_get_default_host(void)
468{
469 return irq_default_domain;
470}
471
David Daneyb526adf2017-08-17 17:53:32 -0700472static void irq_domain_clear_mapping(struct irq_domain *domain,
473 irq_hw_number_t hwirq)
474{
475 if (hwirq < domain->revmap_size) {
476 domain->linear_revmap[hwirq] = 0;
477 } else {
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900478 mutex_lock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700479 radix_tree_delete(&domain->revmap_tree, hwirq);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900480 mutex_unlock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700481 }
482}
483
484static void irq_domain_set_mapping(struct irq_domain *domain,
485 irq_hw_number_t hwirq,
486 struct irq_data *irq_data)
487{
488 if (hwirq < domain->revmap_size) {
489 domain->linear_revmap[hwirq] = irq_data->irq;
490 } else {
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900491 mutex_lock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700492 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900493 mutex_unlock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700494 }
495}
496
Jiang Liu43a77592014-06-09 16:20:05 +0800497void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
Grant Likely913af202012-06-03 22:04:35 -0700498{
Grant Likelyddaf1442013-06-10 01:06:02 +0100499 struct irq_data *irq_data = irq_get_irq_data(irq);
500 irq_hw_number_t hwirq;
Grant Likely913af202012-06-03 22:04:35 -0700501
Grant Likelyddaf1442013-06-10 01:06:02 +0100502 if (WARN(!irq_data || irq_data->domain != domain,
503 "virq%i doesn't exist; cannot disassociate\n", irq))
504 return;
Grant Likely913af202012-06-03 22:04:35 -0700505
Grant Likelyddaf1442013-06-10 01:06:02 +0100506 hwirq = irq_data->hwirq;
507 irq_set_status_flags(irq, IRQ_NOREQUEST);
Grant Likely913af202012-06-03 22:04:35 -0700508
Grant Likelyddaf1442013-06-10 01:06:02 +0100509 /* remove chip and handler */
510 irq_set_chip_and_handler(irq, NULL, NULL);
Grant Likely913af202012-06-03 22:04:35 -0700511
Grant Likelyddaf1442013-06-10 01:06:02 +0100512 /* Make sure it's completed */
513 synchronize_irq(irq);
Grant Likely913af202012-06-03 22:04:35 -0700514
Grant Likelyddaf1442013-06-10 01:06:02 +0100515 /* Tell the PIC about it */
516 if (domain->ops->unmap)
517 domain->ops->unmap(domain, irq);
518 smp_mb();
Grant Likely913af202012-06-03 22:04:35 -0700519
Grant Likelyddaf1442013-06-10 01:06:02 +0100520 irq_data->domain = NULL;
521 irq_data->hwirq = 0;
Thomas Gleixner9dc6be32017-06-20 01:37:16 +0200522 domain->mapcount--;
Grant Likely913af202012-06-03 22:04:35 -0700523
Grant Likelyddaf1442013-06-10 01:06:02 +0100524 /* Clear reverse map for this hwirq */
David Daneyb526adf2017-08-17 17:53:32 -0700525 irq_domain_clear_mapping(domain, hwirq);
Grant Likely913af202012-06-03 22:04:35 -0700526}
527
Grant Likelyddaf1442013-06-10 01:06:02 +0100528int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
529 irq_hw_number_t hwirq)
Grant Likelycc79ca62012-02-16 01:37:49 -0700530{
Grant Likelyddaf1442013-06-10 01:06:02 +0100531 struct irq_data *irq_data = irq_get_irq_data(virq);
532 int ret;
533
534 if (WARN(hwirq >= domain->hwirq_max,
535 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
536 return -EINVAL;
537 if (WARN(!irq_data, "error: virq%i is not allocated", virq))
538 return -EINVAL;
539 if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
540 return -EINVAL;
541
542 mutex_lock(&irq_domain_mutex);
543 irq_data->hwirq = hwirq;
544 irq_data->domain = domain;
545 if (domain->ops->map) {
546 ret = domain->ops->map(domain, virq, hwirq);
547 if (ret != 0) {
548 /*
549 * If map() returns -EPERM, this interrupt is protected
550 * by the firmware or some other service and shall not
551 * be mapped. Don't bother telling the user about it.
552 */
553 if (ret != -EPERM) {
554 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
555 domain->name, hwirq, virq, ret);
556 }
557 irq_data->domain = NULL;
558 irq_data->hwirq = 0;
559 mutex_unlock(&irq_domain_mutex);
560 return ret;
561 }
562
563 /* If not already assigned, give the domain the chip's name */
564 if (!domain->name && irq_data->chip)
565 domain->name = irq_data->chip->name;
566 }
567
Thomas Gleixner9dc6be32017-06-20 01:37:16 +0200568 domain->mapcount++;
David Daneyb526adf2017-08-17 17:53:32 -0700569 irq_domain_set_mapping(domain, hwirq, irq_data);
Grant Likelyddaf1442013-06-10 01:06:02 +0100570 mutex_unlock(&irq_domain_mutex);
571
572 irq_clear_status_flags(virq, IRQ_NOREQUEST);
573
574 return 0;
575}
576EXPORT_SYMBOL_GPL(irq_domain_associate);
577
578void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
579 irq_hw_number_t hwirq_base, int count)
580{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100581 struct device_node *of_node;
Grant Likelyddaf1442013-06-10 01:06:02 +0100582 int i;
Grant Likelycc79ca62012-02-16 01:37:49 -0700583
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100584 of_node = irq_domain_get_of_node(domain);
Grant Likely98aa4682012-06-17 16:17:04 -0600585 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100586 of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
Grant Likely98aa4682012-06-17 16:17:04 -0600587
588 for (i = 0; i < count; i++) {
Grant Likelyddaf1442013-06-10 01:06:02 +0100589 irq_domain_associate(domain, irq_base + i, hwirq_base + i);
Grant Likelycc79ca62012-02-16 01:37:49 -0700590 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700591}
Grant Likely98aa4682012-06-17 16:17:04 -0600592EXPORT_SYMBOL_GPL(irq_domain_associate_many);
Grant Likelycc79ca62012-02-16 01:37:49 -0700593
594/**
595 * irq_create_direct_mapping() - Allocate an irq for direct mapping
Grant Likely68700652012-02-14 14:06:53 -0700596 * @domain: domain to allocate the irq for or NULL for default domain
Grant Likelycc79ca62012-02-16 01:37:49 -0700597 *
598 * This routine is used for irq controllers which can choose the hardware
599 * interrupt numbers they generate. In such a case it's simplest to use
Grant Likely1aa0dd92013-06-08 12:03:59 +0100600 * the linux irq as the hardware interrupt number. It still uses the linear
601 * or radix tree to store the mapping, but the irq controller can optimize
602 * the revmap path by using the hwirq directly.
Grant Likelycc79ca62012-02-16 01:37:49 -0700603 */
Grant Likely68700652012-02-14 14:06:53 -0700604unsigned int irq_create_direct_mapping(struct irq_domain *domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700605{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100606 struct device_node *of_node;
Grant Likelycc79ca62012-02-16 01:37:49 -0700607 unsigned int virq;
608
Grant Likely68700652012-02-14 14:06:53 -0700609 if (domain == NULL)
610 domain = irq_default_domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700611
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100612 of_node = irq_domain_get_of_node(domain);
613 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
Grant Likely03848372012-02-14 14:06:52 -0700614 if (!virq) {
Paul Mundt54a90582012-05-19 15:11:47 +0900615 pr_debug("create_direct virq allocation failed\n");
Grant Likely03848372012-02-14 14:06:52 -0700616 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700617 }
Grant Likely1aa0dd92013-06-08 12:03:59 +0100618 if (virq >= domain->revmap_direct_max_irq) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700619 pr_err("ERROR: no free irqs available below %i maximum\n",
Grant Likely1aa0dd92013-06-08 12:03:59 +0100620 domain->revmap_direct_max_irq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700621 irq_free_desc(virq);
622 return 0;
623 }
Paul Mundt54a90582012-05-19 15:11:47 +0900624 pr_debug("create_direct obtained virq %d\n", virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700625
Grant Likely98aa4682012-06-17 16:17:04 -0600626 if (irq_domain_associate(domain, virq, virq)) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700627 irq_free_desc(virq);
Grant Likely03848372012-02-14 14:06:52 -0700628 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700629 }
630
631 return virq;
632}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900633EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
Grant Likelycc79ca62012-02-16 01:37:49 -0700634
635/**
636 * irq_create_mapping() - Map a hardware interrupt into linux irq space
Grant Likely68700652012-02-14 14:06:53 -0700637 * @domain: domain owning this hardware interrupt or NULL for default domain
638 * @hwirq: hardware irq number in that domain space
Grant Likelycc79ca62012-02-16 01:37:49 -0700639 *
640 * Only one mapping per hardware interrupt is permitted. Returns a linux
641 * irq number.
642 * If the sense/trigger is to be specified, set_irq_type() should be called
643 * on the number returned from that call.
644 */
Grant Likely68700652012-02-14 14:06:53 -0700645unsigned int irq_create_mapping(struct irq_domain *domain,
Grant Likelycc79ca62012-02-16 01:37:49 -0700646 irq_hw_number_t hwirq)
647{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100648 struct device_node *of_node;
David Daney5b7526e2012-04-05 16:52:13 -0700649 int virq;
Grant Likelycc79ca62012-02-16 01:37:49 -0700650
Paul Mundt54a90582012-05-19 15:11:47 +0900651 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700652
Grant Likely68700652012-02-14 14:06:53 -0700653 /* Look for default domain if nececssary */
654 if (domain == NULL)
655 domain = irq_default_domain;
656 if (domain == NULL) {
Kefeng Wang798f0fd2013-06-06 19:20:27 +0800657 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
Grant Likely03848372012-02-14 14:06:52 -0700658 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700659 }
Paul Mundt54a90582012-05-19 15:11:47 +0900660 pr_debug("-> using domain @%p\n", domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700661
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100662 of_node = irq_domain_get_of_node(domain);
663
Grant Likelycc79ca62012-02-16 01:37:49 -0700664 /* Check if mapping already exists */
Grant Likely68700652012-02-14 14:06:53 -0700665 virq = irq_find_mapping(domain, hwirq);
Grant Likely03848372012-02-14 14:06:52 -0700666 if (virq) {
Paul Mundt54a90582012-05-19 15:11:47 +0900667 pr_debug("-> existing mapping on virq %d\n", virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700668 return virq;
669 }
670
Grant Likely1bc04f22012-02-14 14:06:55 -0700671 /* Allocate a virtual interrupt number */
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900672 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
David Daney5b7526e2012-04-05 16:52:13 -0700673 if (virq <= 0) {
Paul Mundt54a90582012-05-19 15:11:47 +0900674 pr_debug("-> virq allocation failed\n");
Grant Likely1bc04f22012-02-14 14:06:55 -0700675 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700676 }
677
Grant Likely98aa4682012-06-17 16:17:04 -0600678 if (irq_domain_associate(domain, virq, hwirq)) {
Grant Likely73255702012-06-03 22:04:35 -0700679 irq_free_desc(virq);
Grant Likely03848372012-02-14 14:06:52 -0700680 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700681 }
682
Paul Mundt54a90582012-05-19 15:11:47 +0900683 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100684 hwirq, of_node_full_name(of_node), virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700685
686 return virq;
687}
688EXPORT_SYMBOL_GPL(irq_create_mapping);
689
Grant Likely98aa4682012-06-17 16:17:04 -0600690/**
691 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
692 * @domain: domain owning the interrupt range
693 * @irq_base: beginning of linux IRQ range
694 * @hwirq_base: beginning of hardware IRQ range
695 * @count: Number of interrupts to map
696 *
697 * This routine is used for allocating and mapping a range of hardware
698 * irqs to linux irqs where the linux irq numbers are at pre-defined
699 * locations. For use by controllers that already have static mappings
700 * to insert in to the domain.
701 *
702 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
703 * domain insertion.
704 *
705 * 0 is returned upon success, while any failure to establish a static
706 * mapping is treated as an error.
707 */
708int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
709 irq_hw_number_t hwirq_base, int count)
710{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100711 struct device_node *of_node;
Grant Likely98aa4682012-06-17 16:17:04 -0600712 int ret;
713
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100714 of_node = irq_domain_get_of_node(domain);
Grant Likely98aa4682012-06-17 16:17:04 -0600715 ret = irq_alloc_descs(irq_base, irq_base, count,
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100716 of_node_to_nid(of_node));
Grant Likely98aa4682012-06-17 16:17:04 -0600717 if (unlikely(ret < 0))
718 return ret;
719
Grant Likelyddaf1442013-06-10 01:06:02 +0100720 irq_domain_associate_many(domain, irq_base, hwirq_base, count);
Grant Likely98aa4682012-06-17 16:17:04 -0600721 return 0;
722}
723EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
724
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100725static int irq_domain_translate(struct irq_domain *d,
726 struct irq_fwspec *fwspec,
727 irq_hw_number_t *hwirq, unsigned int *type)
728{
729#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
730 if (d->ops->translate)
731 return d->ops->translate(d, fwspec, hwirq, type);
732#endif
733 if (d->ops->xlate)
734 return d->ops->xlate(d, to_of_node(fwspec->fwnode),
735 fwspec->param, fwspec->param_count,
736 hwirq, type);
737
738 /* If domain has no translation, then we assume interrupt line */
739 *hwirq = fwspec->param[0];
740 return 0;
741}
742
Brian Masneyb5c231d2019-02-07 21:16:22 -0500743static void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
744 unsigned int count,
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100745 struct irq_fwspec *fwspec)
746{
747 int i;
748
Brian Masneyb5c231d2019-02-07 21:16:22 -0500749 fwspec->fwnode = np ? &np->fwnode : NULL;
750 fwspec->param_count = count;
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100751
Brian Masneyb5c231d2019-02-07 21:16:22 -0500752 for (i = 0; i < count; i++)
753 fwspec->param[i] = args[i];
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100754}
755
Marc Zyngierc0131f02015-10-13 12:51:34 +0100756unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
Grant Likelycc79ca62012-02-16 01:37:49 -0700757{
Grant Likely68700652012-02-14 14:06:53 -0700758 struct irq_domain *domain;
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100759 struct irq_data *irq_data;
Grant Likelycc79ca62012-02-16 01:37:49 -0700760 irq_hw_number_t hwirq;
761 unsigned int type = IRQ_TYPE_NONE;
Jiang Liuf8264e32014-11-06 22:20:14 +0800762 int virq;
Grant Likelycc79ca62012-02-16 01:37:49 -0700763
Marc Zyngier530cbe12016-01-26 13:52:25 +0000764 if (fwspec->fwnode) {
Marc Zyngier651e8b52016-04-11 09:57:51 +0100765 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
Marc Zyngier530cbe12016-01-26 13:52:25 +0000766 if (!domain)
Marc Zyngier651e8b52016-04-11 09:57:51 +0100767 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
Marc Zyngier530cbe12016-01-26 13:52:25 +0000768 } else {
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100769 domain = irq_default_domain;
Marc Zyngier530cbe12016-01-26 13:52:25 +0000770 }
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100771
Grant Likely68700652012-02-14 14:06:53 -0700772 if (!domain) {
Kefeng Wang798f0fd2013-06-06 19:20:27 +0800773 pr_warn("no irq domain found for %s !\n",
Marc Zyngierc0131f02015-10-13 12:51:34 +0100774 of_node_full_name(to_of_node(fwspec->fwnode)));
Grant Likely03848372012-02-14 14:06:52 -0700775 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700776 }
777
Marc Zyngierc0131f02015-10-13 12:51:34 +0100778 if (irq_domain_translate(domain, fwspec, &hwirq, &type))
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100779 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700780
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100781 /*
782 * WARN if the irqchip returns a type with bits
783 * outside the sense mask set and clear these bits.
784 */
785 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
786 type &= IRQ_TYPE_SENSE_MASK;
787
788 /*
789 * If we've already configured this interrupt,
790 * don't do it again, or hell will break loose.
791 */
792 virq = irq_find_mapping(domain, hwirq);
793 if (virq) {
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800794 /*
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100795 * If the trigger type is not specified or matches the
796 * current trigger type then we are done so return the
797 * interrupt number.
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800798 */
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100799 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800800 return virq;
801
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100802 /*
803 * If the trigger type has not been set yet, then set
804 * it now and return the interrupt number.
805 */
806 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100807 irq_data = irq_get_irq_data(virq);
808 if (!irq_data)
809 return 0;
810
811 irqd_set_trigger_type(irq_data, type);
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100812 return virq;
813 }
814
815 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
816 hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
817 return 0;
818 }
819
820 if (irq_domain_is_hierarchy(domain)) {
Marc Zyngierc0131f02015-10-13 12:51:34 +0100821 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800822 if (virq <= 0)
823 return 0;
824 } else {
825 /* Create mapping */
826 virq = irq_create_mapping(domain, hwirq);
827 if (!virq)
828 return virq;
829 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700830
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100831 irq_data = irq_get_irq_data(virq);
832 if (!irq_data) {
833 if (irq_domain_is_hierarchy(domain))
834 irq_domain_free_irqs(virq, 1);
835 else
836 irq_dispose_mapping(virq);
837 return 0;
838 }
839
840 /* Store trigger type */
841 irqd_set_trigger_type(irq_data, type);
842
Grant Likelycc79ca62012-02-16 01:37:49 -0700843 return virq;
844}
Marc Zyngierc0131f02015-10-13 12:51:34 +0100845EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
846
847unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
848{
849 struct irq_fwspec fwspec;
850
Brian Masneyb5c231d2019-02-07 21:16:22 -0500851 of_phandle_args_to_fwspec(irq_data->np, irq_data->args,
852 irq_data->args_count, &fwspec);
853
Marc Zyngierc0131f02015-10-13 12:51:34 +0100854 return irq_create_fwspec_mapping(&fwspec);
855}
Grant Likelycc79ca62012-02-16 01:37:49 -0700856EXPORT_SYMBOL_GPL(irq_create_of_mapping);
857
858/**
859 * irq_dispose_mapping() - Unmap an interrupt
860 * @virq: linux irq number of the interrupt to unmap
861 */
862void irq_dispose_mapping(unsigned int virq)
863{
864 struct irq_data *irq_data = irq_get_irq_data(virq);
Grant Likely68700652012-02-14 14:06:53 -0700865 struct irq_domain *domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700866
Grant Likely03848372012-02-14 14:06:52 -0700867 if (!virq || !irq_data)
Grant Likelycc79ca62012-02-16 01:37:49 -0700868 return;
869
Grant Likely68700652012-02-14 14:06:53 -0700870 domain = irq_data->domain;
871 if (WARN_ON(domain == NULL))
Grant Likelycc79ca62012-02-16 01:37:49 -0700872 return;
873
Jon Hunterd16dcd3d2016-06-21 10:23:22 +0100874 if (irq_domain_is_hierarchy(domain)) {
875 irq_domain_free_irqs(virq, 1);
876 } else {
877 irq_domain_disassociate(domain, virq);
878 irq_free_desc(virq);
879 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700880}
881EXPORT_SYMBOL_GPL(irq_dispose_mapping);
882
883/**
Geert Uytterhoevenb8d62f32018-10-08 13:17:26 +0200884 * irq_find_mapping() - Find a linux irq from a hw irq number.
Grant Likely68700652012-02-14 14:06:53 -0700885 * @domain: domain owning this hardware interrupt
886 * @hwirq: hardware irq number in that domain space
Grant Likelycc79ca62012-02-16 01:37:49 -0700887 */
Grant Likely68700652012-02-14 14:06:53 -0700888unsigned int irq_find_mapping(struct irq_domain *domain,
Grant Likelycc79ca62012-02-16 01:37:49 -0700889 irq_hw_number_t hwirq)
890{
Grant Likely4c0946c2012-06-03 22:04:39 -0700891 struct irq_data *data;
Grant Likelycc79ca62012-02-16 01:37:49 -0700892
Grant Likely68700652012-02-14 14:06:53 -0700893 /* Look for default domain if nececssary */
894 if (domain == NULL)
895 domain = irq_default_domain;
896 if (domain == NULL)
Grant Likely03848372012-02-14 14:06:52 -0700897 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700898
Grant Likely1aa0dd92013-06-08 12:03:59 +0100899 if (hwirq < domain->revmap_direct_max_irq) {
Jiang Liuf8264e32014-11-06 22:20:14 +0800900 data = irq_domain_get_irq_data(domain, hwirq);
901 if (data && data->hwirq == hwirq)
Grant Likely4c0946c2012-06-03 22:04:39 -0700902 return hwirq;
Grant Likely4c0946c2012-06-03 22:04:39 -0700903 }
904
Grant Likelyd3dcb432013-06-10 12:19:17 +0100905 /* Check if the hwirq is in the linear revmap. */
906 if (hwirq < domain->revmap_size)
907 return domain->linear_revmap[hwirq];
908
909 rcu_read_lock();
910 data = radix_tree_lookup(&domain->revmap_tree, hwirq);
911 rcu_read_unlock();
912 return data ? data->irq : 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700913}
914EXPORT_SYMBOL_GPL(irq_find_mapping);
915
Grant Likely16b2e6e2012-01-26 11:26:52 -0700916/**
917 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
918 *
919 * Device Tree IRQ specifier translation function which works with one cell
920 * bindings where the cell value maps directly to the hwirq number.
921 */
922int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
923 const u32 *intspec, unsigned int intsize,
924 unsigned long *out_hwirq, unsigned int *out_type)
Grant Likely7e713302011-07-26 03:19:06 -0600925{
Grant Likely16b2e6e2012-01-26 11:26:52 -0700926 if (WARN_ON(intsize < 1))
Grant Likely7e713302011-07-26 03:19:06 -0600927 return -EINVAL;
Grant Likely7e713302011-07-26 03:19:06 -0600928 *out_hwirq = intspec[0];
929 *out_type = IRQ_TYPE_NONE;
Grant Likely7e713302011-07-26 03:19:06 -0600930 return 0;
931}
Grant Likely16b2e6e2012-01-26 11:26:52 -0700932EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
933
934/**
935 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
936 *
937 * Device Tree IRQ specifier translation function which works with two cell
938 * bindings where the cell values map directly to the hwirq number
939 * and linux irq flags.
940 */
941int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
942 const u32 *intspec, unsigned int intsize,
943 irq_hw_number_t *out_hwirq, unsigned int *out_type)
944{
Brian Masneyb5c231d2019-02-07 21:16:22 -0500945 struct irq_fwspec fwspec;
946
947 of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec);
948 return irq_domain_translate_twocell(d, &fwspec, out_hwirq, out_type);
Grant Likely16b2e6e2012-01-26 11:26:52 -0700949}
950EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
951
952/**
953 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
954 *
955 * Device Tree IRQ specifier translation function which works with either one
956 * or two cell bindings where the cell values map directly to the hwirq number
957 * and linux irq flags.
958 *
959 * Note: don't use this function unless your interrupt controller explicitly
960 * supports both one and two cell bindings. For the majority of controllers
961 * the _onecell() or _twocell() variants above should be used.
962 */
963int irq_domain_xlate_onetwocell(struct irq_domain *d,
964 struct device_node *ctrlr,
965 const u32 *intspec, unsigned int intsize,
966 unsigned long *out_hwirq, unsigned int *out_type)
967{
968 if (WARN_ON(intsize < 1))
969 return -EINVAL;
970 *out_hwirq = intspec[0];
Sebastian Frias0c228912016-08-02 10:52:45 +0200971 if (intsize > 1)
972 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
973 else
974 *out_type = IRQ_TYPE_NONE;
Grant Likely16b2e6e2012-01-26 11:26:52 -0700975 return 0;
976}
977EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
Grant Likely7e713302011-07-26 03:19:06 -0600978
Grant Likelya18dc812012-01-26 12:12:14 -0700979const struct irq_domain_ops irq_domain_simple_ops = {
Grant Likely16b2e6e2012-01-26 11:26:52 -0700980 .xlate = irq_domain_xlate_onetwocell,
Grant Likely75294952012-02-14 14:06:57 -0700981};
982EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
Jiang Liuf8264e32014-11-06 22:20:14 +0800983
Brian Masneyb5c231d2019-02-07 21:16:22 -0500984/**
Yash Shahb01ecce2019-12-10 16:41:09 +0530985 * irq_domain_translate_onecell() - Generic translate for direct one cell
986 * bindings
987 */
988int irq_domain_translate_onecell(struct irq_domain *d,
989 struct irq_fwspec *fwspec,
990 unsigned long *out_hwirq,
991 unsigned int *out_type)
992{
993 if (WARN_ON(fwspec->param_count < 1))
994 return -EINVAL;
995 *out_hwirq = fwspec->param[0];
996 *out_type = IRQ_TYPE_NONE;
997 return 0;
998}
999EXPORT_SYMBOL_GPL(irq_domain_translate_onecell);
1000
1001/**
Brian Masneyb5c231d2019-02-07 21:16:22 -05001002 * irq_domain_translate_twocell() - Generic translate for direct two cell
1003 * bindings
1004 *
1005 * Device Tree IRQ specifier translation function which works with two cell
1006 * bindings where the cell values map directly to the hwirq number
1007 * and linux irq flags.
1008 */
1009int irq_domain_translate_twocell(struct irq_domain *d,
1010 struct irq_fwspec *fwspec,
1011 unsigned long *out_hwirq,
1012 unsigned int *out_type)
1013{
1014 if (WARN_ON(fwspec->param_count < 2))
1015 return -EINVAL;
1016 *out_hwirq = fwspec->param[0];
1017 *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
1018 return 0;
1019}
1020EXPORT_SYMBOL_GPL(irq_domain_translate_twocell);
1021
Qais Yousefac0a0cd2015-12-08 13:20:18 +00001022int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
Dou Liyangbec04032018-12-04 23:51:20 +08001023 int node, const struct irq_affinity_desc *affinity)
Jiang Liuf8264e32014-11-06 22:20:14 +08001024{
1025 unsigned int hint;
1026
1027 if (virq >= 0) {
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001028 virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
1029 affinity);
Jiang Liuf8264e32014-11-06 22:20:14 +08001030 } else {
1031 hint = hwirq % nr_irqs;
1032 if (hint == 0)
1033 hint++;
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001034 virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
1035 affinity);
1036 if (virq <= 0 && hint > 1) {
1037 virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
1038 affinity);
1039 }
Jiang Liuf8264e32014-11-06 22:20:14 +08001040 }
1041
1042 return virq;
1043}
1044
Bartosz Golaszewski5c8f77a2020-05-14 10:39:00 +02001045/**
1046 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1047 * @irq_data: The pointer to irq_data
1048 */
1049void irq_domain_reset_irq_data(struct irq_data *irq_data)
1050{
1051 irq_data->hwirq = 0;
1052 irq_data->chip = &no_irq_chip;
1053 irq_data->chip_data = NULL;
1054}
1055EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1056
Jiang Liuf8264e32014-11-06 22:20:14 +08001057#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
Jiang Liuafb7da82014-11-15 22:24:02 +08001058/**
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001059 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
Jiang Liuafb7da82014-11-15 22:24:02 +08001060 * @parent: Parent irq domain to associate with the new domain
1061 * @flags: Irq domain flags associated to the domain
1062 * @size: Size of the domain. See below
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001063 * @fwnode: Optional fwnode of the interrupt controller
Jiang Liuafb7da82014-11-15 22:24:02 +08001064 * @ops: Pointer to the interrupt domain callbacks
1065 * @host_data: Controller private data pointer
1066 *
1067 * If @size is 0 a tree domain is created, otherwise a linear domain.
1068 *
1069 * If successful the parent is associated to the new domain and the
1070 * domain flags are set.
1071 * Returns pointer to IRQ domain, or NULL on failure.
1072 */
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001073struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
Jiang Liuafb7da82014-11-15 22:24:02 +08001074 unsigned int flags,
1075 unsigned int size,
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001076 struct fwnode_handle *fwnode,
Jiang Liuafb7da82014-11-15 22:24:02 +08001077 const struct irq_domain_ops *ops,
1078 void *host_data)
1079{
1080 struct irq_domain *domain;
1081
1082 if (size)
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001083 domain = irq_domain_create_linear(fwnode, size, ops, host_data);
Jiang Liuafb7da82014-11-15 22:24:02 +08001084 else
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001085 domain = irq_domain_create_tree(fwnode, ops, host_data);
Jiang Liuafb7da82014-11-15 22:24:02 +08001086 if (domain) {
1087 domain->parent = parent;
1088 domain->flags |= flags;
1089 }
1090
1091 return domain;
1092}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001093EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
Jiang Liuafb7da82014-11-15 22:24:02 +08001094
Jiang Liuf8264e32014-11-06 22:20:14 +08001095static void irq_domain_insert_irq(int virq)
1096{
1097 struct irq_data *data;
1098
1099 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1100 struct irq_domain *domain = data->domain;
Jiang Liuf8264e32014-11-06 22:20:14 +08001101
Thomas Gleixner9dc6be32017-06-20 01:37:16 +02001102 domain->mapcount++;
David Daneyb526adf2017-08-17 17:53:32 -07001103 irq_domain_set_mapping(domain, data->hwirq, data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001104
1105 /* If not already assigned, give the domain the chip's name */
1106 if (!domain->name && data->chip)
1107 domain->name = data->chip->name;
1108 }
1109
1110 irq_clear_status_flags(virq, IRQ_NOREQUEST);
1111}
1112
1113static void irq_domain_remove_irq(int virq)
1114{
1115 struct irq_data *data;
1116
1117 irq_set_status_flags(virq, IRQ_NOREQUEST);
1118 irq_set_chip_and_handler(virq, NULL, NULL);
1119 synchronize_irq(virq);
1120 smp_mb();
1121
1122 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1123 struct irq_domain *domain = data->domain;
1124 irq_hw_number_t hwirq = data->hwirq;
1125
Thomas Gleixner9dc6be32017-06-20 01:37:16 +02001126 domain->mapcount--;
David Daneyb526adf2017-08-17 17:53:32 -07001127 irq_domain_clear_mapping(domain, hwirq);
Jiang Liuf8264e32014-11-06 22:20:14 +08001128 }
1129}
1130
1131static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
1132 struct irq_data *child)
1133{
1134 struct irq_data *irq_data;
1135
Jiang Liu67830112015-06-01 16:05:13 +08001136 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
1137 irq_data_get_node(child));
Jiang Liuf8264e32014-11-06 22:20:14 +08001138 if (irq_data) {
1139 child->parent_data = irq_data;
1140 irq_data->irq = child->irq;
Jiang Liu0d0b4c82015-06-01 16:05:12 +08001141 irq_data->common = child->common;
Jiang Liuf8264e32014-11-06 22:20:14 +08001142 irq_data->domain = domain;
1143 }
1144
1145 return irq_data;
1146}
1147
Marc Zyngier55567972020-10-06 10:10:20 +01001148static void __irq_domain_free_hierarchy(struct irq_data *irq_data)
1149{
1150 struct irq_data *tmp;
1151
1152 while (irq_data) {
1153 tmp = irq_data;
1154 irq_data = irq_data->parent_data;
1155 kfree(tmp);
1156 }
1157}
1158
Jiang Liuf8264e32014-11-06 22:20:14 +08001159static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
1160{
1161 struct irq_data *irq_data, *tmp;
1162 int i;
1163
1164 for (i = 0; i < nr_irqs; i++) {
1165 irq_data = irq_get_irq_data(virq + i);
1166 tmp = irq_data->parent_data;
1167 irq_data->parent_data = NULL;
1168 irq_data->domain = NULL;
1169
Marc Zyngier55567972020-10-06 10:10:20 +01001170 __irq_domain_free_hierarchy(tmp);
1171 }
1172}
1173
1174/**
1175 * irq_domain_disconnect_hierarchy - Mark the first unused level of a hierarchy
1176 * @domain: IRQ domain from which the hierarchy is to be disconnected
1177 * @virq: IRQ number where the hierarchy is to be trimmed
1178 *
1179 * Marks the @virq level belonging to @domain as disconnected.
1180 * Returns -EINVAL if @virq doesn't have a valid irq_data pointing
1181 * to @domain.
1182 *
1183 * Its only use is to be able to trim levels of hierarchy that do not
1184 * have any real meaning for this interrupt, and that the driver marks
1185 * as such from its .alloc() callback.
1186 */
1187int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
1188 unsigned int virq)
1189{
1190 struct irq_data *irqd;
1191
1192 irqd = irq_domain_get_irq_data(domain, virq);
1193 if (!irqd)
1194 return -EINVAL;
1195
1196 irqd->chip = ERR_PTR(-ENOTCONN);
1197 return 0;
1198}
1199
1200static int irq_domain_trim_hierarchy(unsigned int virq)
1201{
1202 struct irq_data *tail, *irqd, *irq_data;
1203
1204 irq_data = irq_get_irq_data(virq);
1205 tail = NULL;
1206
1207 /* The first entry must have a valid irqchip */
1208 if (!irq_data->chip || IS_ERR(irq_data->chip))
1209 return -EINVAL;
1210
1211 /*
1212 * Validate that the irq_data chain is sane in the presence of
1213 * a hierarchy trimming marker.
1214 */
1215 for (irqd = irq_data->parent_data; irqd; irq_data = irqd, irqd = irqd->parent_data) {
1216 /* Can't have a valid irqchip after a trim marker */
1217 if (irqd->chip && tail)
1218 return -EINVAL;
1219
1220 /* Can't have an empty irqchip before a trim marker */
1221 if (!irqd->chip && !tail)
1222 return -EINVAL;
1223
1224 if (IS_ERR(irqd->chip)) {
1225 /* Only -ENOTCONN is a valid trim marker */
1226 if (PTR_ERR(irqd->chip) != -ENOTCONN)
1227 return -EINVAL;
1228
1229 tail = irq_data;
Jiang Liuf8264e32014-11-06 22:20:14 +08001230 }
1231 }
Marc Zyngier55567972020-10-06 10:10:20 +01001232
1233 /* No trim marker, nothing to do */
1234 if (!tail)
1235 return 0;
1236
1237 pr_info("IRQ%d: trimming hierarchy from %s\n",
1238 virq, tail->parent_data->domain->name);
1239
1240 /* Sever the inner part of the hierarchy... */
1241 irqd = tail;
1242 tail = tail->parent_data;
1243 irqd->parent_data = NULL;
1244 __irq_domain_free_hierarchy(tail);
1245
1246 return 0;
Jiang Liuf8264e32014-11-06 22:20:14 +08001247}
1248
1249static int irq_domain_alloc_irq_data(struct irq_domain *domain,
1250 unsigned int virq, unsigned int nr_irqs)
1251{
1252 struct irq_data *irq_data;
1253 struct irq_domain *parent;
1254 int i;
1255
1256 /* The outermost irq_data is embedded in struct irq_desc */
1257 for (i = 0; i < nr_irqs; i++) {
1258 irq_data = irq_get_irq_data(virq + i);
1259 irq_data->domain = domain;
1260
1261 for (parent = domain->parent; parent; parent = parent->parent) {
1262 irq_data = irq_domain_insert_irq_data(parent, irq_data);
1263 if (!irq_data) {
1264 irq_domain_free_irq_data(virq, i + 1);
1265 return -ENOMEM;
1266 }
1267 }
1268 }
1269
1270 return 0;
1271}
1272
1273/**
1274 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1275 * @domain: domain to match
1276 * @virq: IRQ number to get irq_data
1277 */
1278struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1279 unsigned int virq)
1280{
1281 struct irq_data *irq_data;
1282
1283 for (irq_data = irq_get_irq_data(virq); irq_data;
1284 irq_data = irq_data->parent_data)
1285 if (irq_data->domain == domain)
1286 return irq_data;
1287
1288 return NULL;
1289}
Jake Oshinsa4289dc2015-12-10 17:52:59 +00001290EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001291
1292/**
1293 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1294 * @domain: Interrupt domain to match
1295 * @virq: IRQ number
1296 * @hwirq: The hwirq number
1297 * @chip: The associated interrupt chip
1298 * @chip_data: The associated chip data
1299 */
1300int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1301 irq_hw_number_t hwirq, struct irq_chip *chip,
1302 void *chip_data)
1303{
1304 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
1305
1306 if (!irq_data)
1307 return -ENOENT;
1308
1309 irq_data->hwirq = hwirq;
1310 irq_data->chip = chip ? chip : &no_irq_chip;
1311 irq_data->chip_data = chip_data;
1312
1313 return 0;
1314}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001315EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
Jiang Liuf8264e32014-11-06 22:20:14 +08001316
1317/**
Jiang Liu1b537702014-11-09 23:10:24 +08001318 * irq_domain_set_info - Set the complete data for a @virq in @domain
1319 * @domain: Interrupt domain to match
1320 * @virq: IRQ number
1321 * @hwirq: The hardware interrupt number
1322 * @chip: The associated interrupt chip
1323 * @chip_data: The associated interrupt chip data
1324 * @handler: The interrupt flow handler
1325 * @handler_data: The interrupt flow handler data
1326 * @handler_name: The interrupt handler name
1327 */
1328void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1329 irq_hw_number_t hwirq, struct irq_chip *chip,
1330 void *chip_data, irq_flow_handler_t handler,
1331 void *handler_data, const char *handler_name)
1332{
1333 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
1334 __irq_set_handler(virq, handler, 0, handler_name);
1335 irq_set_handler_data(virq, handler_data);
1336}
Keith Busch64bce3e2016-01-12 13:18:07 -07001337EXPORT_SYMBOL(irq_domain_set_info);
Jiang Liu1b537702014-11-09 23:10:24 +08001338
1339/**
Jiang Liuf8264e32014-11-06 22:20:14 +08001340 * irq_domain_free_irqs_common - Clear irq_data and free the parent
1341 * @domain: Interrupt domain to match
1342 * @virq: IRQ number to start with
1343 * @nr_irqs: The number of irqs to free
1344 */
1345void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
1346 unsigned int nr_irqs)
1347{
1348 struct irq_data *irq_data;
1349 int i;
1350
1351 for (i = 0; i < nr_irqs; i++) {
1352 irq_data = irq_domain_get_irq_data(domain, virq + i);
1353 if (irq_data)
1354 irq_domain_reset_irq_data(irq_data);
1355 }
1356 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1357}
Axel Lin63cc7872016-03-17 12:00:31 +08001358EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
Jiang Liuf8264e32014-11-06 22:20:14 +08001359
1360/**
1361 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
1362 * @domain: Interrupt domain to match
1363 * @virq: IRQ number to start with
1364 * @nr_irqs: The number of irqs to free
1365 */
1366void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
1367 unsigned int nr_irqs)
1368{
1369 int i;
1370
1371 for (i = 0; i < nr_irqs; i++) {
1372 irq_set_handler_data(virq + i, NULL);
1373 irq_set_handler(virq + i, NULL);
1374 }
1375 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1376}
1377
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001378static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
Jiang Liu36d72732014-11-15 22:24:01 +08001379 unsigned int irq_base,
1380 unsigned int nr_irqs)
1381{
David Daney0d12ec02017-08-17 17:53:33 -07001382 if (domain->ops->free)
1383 domain->ops->free(domain, irq_base, nr_irqs);
Jiang Liu36d72732014-11-15 22:24:01 +08001384}
1385
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001386int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
Marc Zyngierc4665952015-11-23 08:26:04 +00001387 unsigned int irq_base,
1388 unsigned int nr_irqs, void *arg)
Jiang Liu36d72732014-11-15 22:24:01 +08001389{
Alexander Sverdlin87f2d1c2020-03-06 18:47:20 +01001390 if (!domain->ops->alloc) {
1391 pr_debug("domain->ops->alloc() is NULL\n");
1392 return -ENOSYS;
1393 }
1394
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001395 return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
Jiang Liu36d72732014-11-15 22:24:01 +08001396}
1397
Jiang Liuf8264e32014-11-06 22:20:14 +08001398/**
1399 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1400 * @domain: domain to allocate from
Julien Grall08970ec2019-04-18 16:54:01 +01001401 * @irq_base: allocate specified IRQ number if irq_base >= 0
Jiang Liuf8264e32014-11-06 22:20:14 +08001402 * @nr_irqs: number of IRQs to allocate
1403 * @node: NUMA node id for memory allocation
1404 * @arg: domain specific argument
1405 * @realloc: IRQ descriptors have already been allocated if true
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001406 * @affinity: Optional irq affinity mask for multiqueue devices
Jiang Liuf8264e32014-11-06 22:20:14 +08001407 *
1408 * Allocate IRQ numbers and initialized all data structures to support
1409 * hierarchy IRQ domains.
1410 * Parameter @realloc is mainly to support legacy IRQs.
1411 * Returns error code or allocated IRQ number
1412 *
1413 * The whole process to setup an IRQ has been split into two steps.
1414 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1415 * descriptor and required hardware resources. The second step,
1416 * irq_domain_activate_irq(), is to program hardwares with preallocated
1417 * resources. In this way, it's easier to rollback when failing to
1418 * allocate resources.
1419 */
1420int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1421 unsigned int nr_irqs, int node, void *arg,
Dou Liyangbec04032018-12-04 23:51:20 +08001422 bool realloc, const struct irq_affinity_desc *affinity)
Jiang Liuf8264e32014-11-06 22:20:14 +08001423{
1424 int i, ret, virq;
1425
1426 if (domain == NULL) {
1427 domain = irq_default_domain;
1428 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1429 return -EINVAL;
1430 }
1431
Jiang Liuf8264e32014-11-06 22:20:14 +08001432 if (realloc && irq_base >= 0) {
1433 virq = irq_base;
1434 } else {
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001435 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
1436 affinity);
Jiang Liuf8264e32014-11-06 22:20:14 +08001437 if (virq < 0) {
1438 pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1439 irq_base, nr_irqs);
1440 return virq;
1441 }
1442 }
1443
1444 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1445 pr_debug("cannot allocate memory for IRQ%d\n", virq);
1446 ret = -ENOMEM;
1447 goto out_free_desc;
1448 }
1449
1450 mutex_lock(&irq_domain_mutex);
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001451 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
Jiang Liuf8264e32014-11-06 22:20:14 +08001452 if (ret < 0) {
1453 mutex_unlock(&irq_domain_mutex);
1454 goto out_free_irq_data;
1455 }
Marc Zyngier55567972020-10-06 10:10:20 +01001456
1457 for (i = 0; i < nr_irqs; i++) {
1458 ret = irq_domain_trim_hierarchy(virq + i);
1459 if (ret) {
1460 mutex_unlock(&irq_domain_mutex);
1461 goto out_free_irq_data;
1462 }
1463 }
1464
Jiang Liuf8264e32014-11-06 22:20:14 +08001465 for (i = 0; i < nr_irqs; i++)
1466 irq_domain_insert_irq(virq + i);
1467 mutex_unlock(&irq_domain_mutex);
1468
1469 return virq;
1470
1471out_free_irq_data:
1472 irq_domain_free_irq_data(virq, nr_irqs);
1473out_free_desc:
1474 irq_free_descs(virq, nr_irqs);
1475 return ret;
1476}
1477
David Daney495c38d2017-08-17 17:53:34 -07001478/* The irq_data was moved, fix the revmap to refer to the new location */
1479static void irq_domain_fix_revmap(struct irq_data *d)
1480{
Masahiro Yamadad03cc2d2017-09-22 21:20:41 +09001481 void __rcu **slot;
David Daney495c38d2017-08-17 17:53:34 -07001482
1483 if (d->hwirq < d->domain->revmap_size)
1484 return; /* Not using radix tree. */
1485
1486 /* Fix up the revmap. */
Masahiro Yamadaf1d78352017-10-05 10:44:54 +09001487 mutex_lock(&d->domain->revmap_tree_mutex);
David Daney495c38d2017-08-17 17:53:34 -07001488 slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1489 if (slot)
1490 radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +09001491 mutex_unlock(&d->domain->revmap_tree_mutex);
David Daney495c38d2017-08-17 17:53:34 -07001492}
1493
1494/**
1495 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1496 * @domain: Domain to push.
1497 * @virq: Irq to push the domain in to.
1498 * @arg: Passed to the irq_domain_ops alloc() function.
1499 *
1500 * For an already existing irqdomain hierarchy, as might be obtained
1501 * via a call to pci_enable_msix(), add an additional domain to the
1502 * head of the processing chain. Must be called before request_irq()
1503 * has been called.
1504 */
1505int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1506{
1507 struct irq_data *child_irq_data;
1508 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1509 struct irq_desc *desc;
1510 int rv = 0;
1511
1512 /*
1513 * Check that no action has been set, which indicates the virq
1514 * is in a state where this function doesn't have to deal with
1515 * races between interrupt handling and maintaining the
1516 * hierarchy. This will catch gross misuse. Attempting to
1517 * make the check race free would require holding locks across
1518 * calls to struct irq_domain_ops->alloc(), which could lead
1519 * to deadlock, so we just do a simple check before starting.
1520 */
1521 desc = irq_to_desc(virq);
1522 if (!desc)
1523 return -EINVAL;
1524 if (WARN_ON(desc->action))
1525 return -EBUSY;
1526
1527 if (domain == NULL)
1528 return -EINVAL;
1529
1530 if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1531 return -EINVAL;
1532
Dan Carpenter20c4d492017-08-25 15:14:09 +03001533 if (!root_irq_data)
David Daney495c38d2017-08-17 17:53:34 -07001534 return -EINVAL;
1535
Dan Carpenter20c4d492017-08-25 15:14:09 +03001536 if (domain->parent != root_irq_data->domain)
David Daney495c38d2017-08-17 17:53:34 -07001537 return -EINVAL;
1538
1539 child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1540 irq_data_get_node(root_irq_data));
1541 if (!child_irq_data)
1542 return -ENOMEM;
1543
1544 mutex_lock(&irq_domain_mutex);
1545
1546 /* Copy the original irq_data. */
1547 *child_irq_data = *root_irq_data;
1548
1549 /*
1550 * Overwrite the root_irq_data, which is embedded in struct
1551 * irq_desc, with values for this domain.
1552 */
1553 root_irq_data->parent_data = child_irq_data;
1554 root_irq_data->domain = domain;
1555 root_irq_data->mask = 0;
1556 root_irq_data->hwirq = 0;
1557 root_irq_data->chip = NULL;
1558 root_irq_data->chip_data = NULL;
1559
1560 /* May (probably does) set hwirq, chip, etc. */
1561 rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1562 if (rv) {
1563 /* Restore the original irq_data. */
1564 *root_irq_data = *child_irq_data;
Kevin Hao0f394da2020-01-20 12:35:47 +08001565 kfree(child_irq_data);
David Daney495c38d2017-08-17 17:53:34 -07001566 goto error;
1567 }
1568
1569 irq_domain_fix_revmap(child_irq_data);
1570 irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1571
1572error:
1573 mutex_unlock(&irq_domain_mutex);
1574
1575 return rv;
1576}
1577EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1578
1579/**
1580 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1581 * @domain: Domain to remove.
1582 * @virq: Irq to remove the domain from.
1583 *
1584 * Undo the effects of a call to irq_domain_push_irq(). Must be
1585 * called either before request_irq() or after free_irq().
1586 */
1587int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1588{
1589 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1590 struct irq_data *child_irq_data;
1591 struct irq_data *tmp_irq_data;
1592 struct irq_desc *desc;
1593
1594 /*
1595 * Check that no action is set, which indicates the virq is in
1596 * a state where this function doesn't have to deal with races
1597 * between interrupt handling and maintaining the hierarchy.
1598 * This will catch gross misuse. Attempting to make the check
1599 * race free would require holding locks across calls to
1600 * struct irq_domain_ops->free(), which could lead to
1601 * deadlock, so we just do a simple check before starting.
1602 */
1603 desc = irq_to_desc(virq);
1604 if (!desc)
1605 return -EINVAL;
1606 if (WARN_ON(desc->action))
1607 return -EBUSY;
1608
1609 if (domain == NULL)
1610 return -EINVAL;
1611
1612 if (!root_irq_data)
1613 return -EINVAL;
1614
1615 tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1616
1617 /* We can only "pop" if this domain is at the top of the list */
1618 if (WARN_ON(root_irq_data != tmp_irq_data))
1619 return -EINVAL;
1620
1621 if (WARN_ON(root_irq_data->domain != domain))
1622 return -EINVAL;
1623
1624 child_irq_data = root_irq_data->parent_data;
1625 if (WARN_ON(!child_irq_data))
1626 return -EINVAL;
1627
1628 mutex_lock(&irq_domain_mutex);
1629
1630 root_irq_data->parent_data = NULL;
1631
1632 irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1633 irq_domain_free_irqs_hierarchy(domain, virq, 1);
1634
1635 /* Restore the original irq_data. */
1636 *root_irq_data = *child_irq_data;
1637
1638 irq_domain_fix_revmap(root_irq_data);
1639
1640 mutex_unlock(&irq_domain_mutex);
1641
1642 kfree(child_irq_data);
1643
1644 return 0;
1645}
1646EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1647
Jiang Liuf8264e32014-11-06 22:20:14 +08001648/**
1649 * irq_domain_free_irqs - Free IRQ number and associated data structures
1650 * @virq: base IRQ number
1651 * @nr_irqs: number of IRQs to free
1652 */
1653void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1654{
1655 struct irq_data *data = irq_get_irq_data(virq);
1656 int i;
1657
1658 if (WARN(!data || !data->domain || !data->domain->ops->free,
1659 "NULL pointer, cannot free irq\n"))
1660 return;
1661
1662 mutex_lock(&irq_domain_mutex);
1663 for (i = 0; i < nr_irqs; i++)
1664 irq_domain_remove_irq(virq + i);
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001665 irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
Jiang Liuf8264e32014-11-06 22:20:14 +08001666 mutex_unlock(&irq_domain_mutex);
1667
1668 irq_domain_free_irq_data(virq, nr_irqs);
1669 irq_free_descs(virq, nr_irqs);
1670}
1671
1672/**
Jiang Liu36d72732014-11-15 22:24:01 +08001673 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1674 * @irq_base: Base IRQ number
1675 * @nr_irqs: Number of IRQs to allocate
1676 * @arg: Allocation data (arch/domain specific)
1677 *
1678 * Check whether the domain has been setup recursive. If not allocate
1679 * through the parent domain.
1680 */
1681int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1682 unsigned int irq_base, unsigned int nr_irqs,
1683 void *arg)
1684{
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001685 if (!domain->parent)
1686 return -ENOSYS;
Jiang Liu36d72732014-11-15 22:24:01 +08001687
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001688 return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1689 nr_irqs, arg);
Jiang Liu36d72732014-11-15 22:24:01 +08001690}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001691EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
Jiang Liu36d72732014-11-15 22:24:01 +08001692
1693/**
1694 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1695 * @irq_base: Base IRQ number
1696 * @nr_irqs: Number of IRQs to free
1697 *
1698 * Check whether the domain has been setup recursive. If not free
1699 * through the parent domain.
1700 */
1701void irq_domain_free_irqs_parent(struct irq_domain *domain,
1702 unsigned int irq_base, unsigned int nr_irqs)
1703{
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001704 if (!domain->parent)
1705 return;
1706
1707 irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
Jiang Liu36d72732014-11-15 22:24:01 +08001708}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001709EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
Jiang Liu36d72732014-11-15 22:24:01 +08001710
Marc Zyngier08d85f32017-01-17 16:00:48 +00001711static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1712{
1713 if (irq_data && irq_data->domain) {
1714 struct irq_domain *domain = irq_data->domain;
1715
1716 if (domain->ops->deactivate)
1717 domain->ops->deactivate(domain, irq_data);
1718 if (irq_data->parent_data)
1719 __irq_domain_deactivate_irq(irq_data->parent_data);
1720 }
1721}
1722
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001723static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001724{
1725 int ret = 0;
1726
1727 if (irqd && irqd->domain) {
1728 struct irq_domain *domain = irqd->domain;
1729
1730 if (irqd->parent_data)
Thomas Gleixner42e1cc22017-09-13 23:29:12 +02001731 ret = __irq_domain_activate_irq(irqd->parent_data,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001732 reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001733 if (!ret && domain->ops->activate) {
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001734 ret = domain->ops->activate(domain, irqd, reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001735 /* Rollback in case of error */
1736 if (ret && irqd->parent_data)
1737 __irq_domain_deactivate_irq(irqd->parent_data);
1738 }
1739 }
1740 return ret;
1741}
1742
Jiang Liu36d72732014-11-15 22:24:01 +08001743/**
Jiang Liuf8264e32014-11-06 22:20:14 +08001744 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1745 * interrupt
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001746 * @irq_data: Outermost irq_data associated with interrupt
1747 * @reserve: If set only reserve an interrupt vector instead of assigning one
Jiang Liuf8264e32014-11-06 22:20:14 +08001748 *
1749 * This is the second step to call domain_ops->activate to program interrupt
1750 * controllers, so the interrupt could actually get delivered.
1751 */
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001752int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
Jiang Liuf8264e32014-11-06 22:20:14 +08001753{
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001754 int ret = 0;
1755
1756 if (!irqd_is_activated(irq_data))
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001757 ret = __irq_domain_activate_irq(irq_data, reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001758 if (!ret)
Marc Zyngier08d85f32017-01-17 16:00:48 +00001759 irqd_set_activated(irq_data);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001760 return ret;
Jiang Liuf8264e32014-11-06 22:20:14 +08001761}
1762
1763/**
1764 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1765 * deactivate interrupt
1766 * @irq_data: outermost irq_data associated with interrupt
1767 *
1768 * It calls domain_ops->deactivate to program interrupt controllers to disable
1769 * interrupt delivery.
1770 */
1771void irq_domain_deactivate_irq(struct irq_data *irq_data)
1772{
Marc Zyngier08d85f32017-01-17 16:00:48 +00001773 if (irqd_is_activated(irq_data)) {
1774 __irq_domain_deactivate_irq(irq_data);
1775 irqd_clr_activated(irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001776 }
1777}
1778
1779static void irq_domain_check_hierarchy(struct irq_domain *domain)
1780{
1781 /* Hierarchy irq_domains must implement callback alloc() */
1782 if (domain->ops->alloc)
1783 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1784}
Eric Auger631a9632017-01-19 20:57:57 +00001785
1786/**
1787 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
1788 * parent has MSI remapping support
1789 * @domain: domain pointer
1790 */
1791bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
1792{
1793 for (; domain; domain = domain->parent) {
1794 if (irq_domain_is_msi_remap(domain))
1795 return true;
1796 }
1797 return false;
1798}
Jiang Liuf8264e32014-11-06 22:20:14 +08001799#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1800/**
1801 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1802 * @domain: domain to match
1803 * @virq: IRQ number to get irq_data
1804 */
1805struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1806 unsigned int virq)
1807{
1808 struct irq_data *irq_data = irq_get_irq_data(virq);
1809
1810 return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1811}
Jake Oshinsa4289dc2015-12-10 17:52:59 +00001812EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001813
Stefan Agner5f22f5c2015-05-16 11:44:13 +02001814/**
1815 * irq_domain_set_info - Set the complete data for a @virq in @domain
1816 * @domain: Interrupt domain to match
1817 * @virq: IRQ number
1818 * @hwirq: The hardware interrupt number
1819 * @chip: The associated interrupt chip
1820 * @chip_data: The associated interrupt chip data
1821 * @handler: The interrupt flow handler
1822 * @handler_data: The interrupt flow handler data
1823 * @handler_name: The interrupt handler name
1824 */
1825void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1826 irq_hw_number_t hwirq, struct irq_chip *chip,
1827 void *chip_data, irq_flow_handler_t handler,
1828 void *handler_data, const char *handler_name)
1829{
1830 irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
1831 irq_set_chip_data(virq, chip_data);
1832 irq_set_handler_data(virq, handler_data);
1833}
1834
Jiang Liuf8264e32014-11-06 22:20:14 +08001835static void irq_domain_check_hierarchy(struct irq_domain *domain)
1836{
1837}
1838#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001839
1840#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1841static struct dentry *domain_dir;
1842
1843static void
1844irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1845{
1846 seq_printf(m, "%*sname: %s\n", ind, "", d->name);
1847 seq_printf(m, "%*ssize: %u\n", ind + 1, "",
1848 d->revmap_size + d->revmap_direct_max_irq);
1849 seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1850 seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
Thomas Gleixnerc3e72392017-09-13 23:29:06 +02001851 if (d->ops && d->ops->debug_show)
1852 d->ops->debug_show(m, d, NULL, ind + 1);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001853#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1854 if (!d->parent)
1855 return;
1856 seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
1857 irq_domain_debug_show_one(m, d->parent, ind + 4);
1858#endif
1859}
1860
1861static int irq_domain_debug_show(struct seq_file *m, void *p)
1862{
1863 struct irq_domain *d = m->private;
1864
1865 /* Default domain? Might be NULL */
1866 if (!d) {
1867 if (!irq_default_domain)
1868 return 0;
1869 d = irq_default_domain;
1870 }
1871 irq_domain_debug_show_one(m, d, 0);
1872 return 0;
1873}
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001874DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001875
1876static void debugfs_add_domain_dir(struct irq_domain *d)
1877{
1878 if (!d->name || !domain_dir || d->debugfs_file)
1879 return;
1880 d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001881 &irq_domain_debug_fops);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001882}
1883
1884static void debugfs_remove_domain_dir(struct irq_domain *d)
1885{
Thomas Gleixnerf610c9d2017-07-07 08:57:57 +02001886 debugfs_remove(d->debugfs_file);
Marc Zyngier513145e2018-10-01 11:05:21 +01001887 d->debugfs_file = NULL;
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001888}
1889
1890void __init irq_domain_debugfs_init(struct dentry *root)
1891{
1892 struct irq_domain *d;
1893
1894 domain_dir = debugfs_create_dir("domains", root);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001895
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001896 debugfs_create_file("default", 0444, domain_dir, NULL,
1897 &irq_domain_debug_fops);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001898 mutex_lock(&irq_domain_mutex);
1899 list_for_each_entry(d, &irq_domain_list, link)
1900 debugfs_add_domain_dir(d);
1901 mutex_unlock(&irq_domain_mutex);
1902}
1903#endif