blob: a4c2c915511d3814a4984aca7338a0e9ebce861e [file] [log] [blame]
Thomas Gleixner52a65ff2018-03-14 22:15:19 +01001// SPDX-License-Identifier: GPL-2.0
2
Paul Mundt54a90582012-05-19 15:11:47 +09003#define pr_fmt(fmt) "irq: " fmt
4
Marc Zyngierc5c601c2017-07-07 09:39:59 +01005#include <linux/acpi.h>
Grant Likelycc79ca62012-02-16 01:37:49 -07006#include <linux/debugfs.h>
7#include <linux/hardirq.h>
8#include <linux/interrupt.h>
Grant Likely08a543a2011-07-26 03:19:06 -06009#include <linux/irq.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070010#include <linux/irqdesc.h>
Grant Likely08a543a2011-07-26 03:19:06 -060011#include <linux/irqdomain.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/of.h>
Grant Likely7e713302011-07-26 03:19:06 -060015#include <linux/of_address.h>
Rashika Kheria64be38a2014-02-27 17:10:12 +053016#include <linux/of_irq.h>
Paul Mundt5ca4db62012-06-03 22:04:34 -070017#include <linux/topology.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070018#include <linux/seq_file.h>
Grant Likely7e713302011-07-26 03:19:06 -060019#include <linux/slab.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070020#include <linux/smp.h>
21#include <linux/fs.h>
Grant Likely08a543a2011-07-26 03:19:06 -060022
23static LIST_HEAD(irq_domain_list);
24static DEFINE_MUTEX(irq_domain_mutex);
25
Grant Likely68700652012-02-14 14:06:53 -070026static struct irq_domain *irq_default_domain;
Grant Likelycc79ca62012-02-16 01:37:49 -070027
Jiang Liuf8264e32014-11-06 22:20:14 +080028static void irq_domain_check_hierarchy(struct irq_domain *domain);
29
Marc Zyngierb145dcc2015-10-13 12:51:36 +010030struct irqchip_fwid {
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020031 struct fwnode_handle fwnode;
32 unsigned int type;
33 char *name;
Marc Zyngierb977fcf2019-07-31 15:13:19 +010034 phys_addr_t *pa;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010035};
36
Thomas Gleixner087cdfb2017-06-20 01:37:17 +020037#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
38static void debugfs_add_domain_dir(struct irq_domain *d);
39static void debugfs_remove_domain_dir(struct irq_domain *d);
40#else
41static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
42static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
43#endif
44
Sakari Ailusdb3e50f2017-07-21 14:39:31 +030045const struct fwnode_operations irqchip_fwnode_ops;
Arnd Bergmannb6eb66f2017-07-26 02:19:35 +020046EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
Sakari Ailusdb3e50f2017-07-21 14:39:31 +030047
Marc Zyngierb145dcc2015-10-13 12:51:36 +010048/**
luanshib513df62020-03-03 09:48:45 +080049 * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
Marc Zyngierb145dcc2015-10-13 12:51:36 +010050 * identifying an irq domain
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020051 * @type: Type of irqchip_fwnode. See linux/irqdomain.h
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020052 * @id: Optional user provided id if name != NULL
luanshib513df62020-03-03 09:48:45 +080053 * @name: Optional user provided domain name
Yi Wang0ed9ca252019-10-19 17:07:27 +080054 * @pa: Optional user-provided physical address
Marc Zyngierb145dcc2015-10-13 12:51:36 +010055 *
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020056 * Allocate a struct irqchip_fwid, and return a poiner to the embedded
Marc Zyngierb145dcc2015-10-13 12:51:36 +010057 * fwnode_handle (or NULL on failure).
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020058 *
59 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
60 * solely to transport name information to irqdomain creation code. The
61 * node is not stored. For other types the pointer is kept in the irq
62 * domain struct.
Marc Zyngierb145dcc2015-10-13 12:51:36 +010063 */
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020064struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
Marc Zyngierb977fcf2019-07-31 15:13:19 +010065 const char *name,
66 phys_addr_t *pa)
Marc Zyngierb145dcc2015-10-13 12:51:36 +010067{
68 struct irqchip_fwid *fwid;
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020069 char *n;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010070
71 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010072
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020073 switch (type) {
74 case IRQCHIP_FWNODE_NAMED:
75 n = kasprintf(GFP_KERNEL, "%s", name);
76 break;
77 case IRQCHIP_FWNODE_NAMED_ID:
78 n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
79 break;
80 default:
Marc Zyngierb977fcf2019-07-31 15:13:19 +010081 n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020082 break;
83 }
84
85 if (!fwid || !n) {
Marc Zyngierb145dcc2015-10-13 12:51:36 +010086 kfree(fwid);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020087 kfree(n);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010088 return NULL;
89 }
90
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020091 fwid->type = type;
92 fwid->name = n;
Marc Zyngierb977fcf2019-07-31 15:13:19 +010093 fwid->pa = pa;
Sakari Ailusdb3e50f2017-07-21 14:39:31 +030094 fwid->fwnode.ops = &irqchip_fwnode_ops;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010095 return &fwid->fwnode;
96}
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020097EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010098
99/**
100 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
101 *
102 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
103 */
104void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
105{
106 struct irqchip_fwid *fwid;
107
Suravee Suthikulpanit75aba7b2015-12-10 08:55:28 -0800108 if (WARN_ON(!is_fwnode_irqchip(fwnode)))
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100109 return;
110
111 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
112 kfree(fwid->name);
113 kfree(fwid);
114}
Jake Oshinsa4289dc2015-12-10 17:52:59 +0000115EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100116
Grant Likelycc79ca62012-02-16 01:37:49 -0700117/**
Grant Likelyfa40f372013-06-08 12:57:40 +0100118 * __irq_domain_add() - Allocate a new irq_domain data structure
Punit Agrawal545d5d62016-05-31 13:56:48 +0100119 * @fwnode: firmware node for the interrupt controller
Grant Likelyfa40f372013-06-08 12:57:40 +0100120 * @size: Size of linear map; 0 for radix mapping only
Jiang Liua2579542014-05-27 16:07:37 +0800121 * @hwirq_max: Maximum number of interrupts supported by controller
Grant Likelyfa40f372013-06-08 12:57:40 +0100122 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
123 * direct mapping
Jiang Liuf8264e32014-11-06 22:20:14 +0800124 * @ops: domain callbacks
Grant Likelya8db8cf2012-02-14 14:06:54 -0700125 * @host_data: Controller private data pointer
Grant Likelycc79ca62012-02-16 01:37:49 -0700126 *
Zenghui Yu3a1d24c2019-07-06 04:41:12 +0000127 * Allocates and initializes an irq_domain structure.
Jiang Liua2579542014-05-27 16:07:37 +0800128 * Returns pointer to IRQ domain, or NULL on failure.
Grant Likelycc79ca62012-02-16 01:37:49 -0700129 */
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100130struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
Grant Likelyddaf1442013-06-10 01:06:02 +0100131 irq_hw_number_t hwirq_max, int direct_max,
Grant Likelyfa40f372013-06-08 12:57:40 +0100132 const struct irq_domain_ops *ops,
133 void *host_data)
Grant Likelycc79ca62012-02-16 01:37:49 -0700134{
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200135 struct irqchip_fwid *fwid;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700136 struct irq_domain *domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700137
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200138 static atomic_t unknown_domains;
139
Grant Likelycef50752012-07-11 17:24:31 +0100140 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300141 GFP_KERNEL, of_node_to_nid(to_of_node(fwnode)));
Geert Uytterhoeven43b98d82019-05-27 13:57:42 +0200142 if (!domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700143 return NULL;
144
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200145 if (fwnode && is_fwnode_irqchip(fwnode)) {
146 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
147
148 switch (fwid->type) {
149 case IRQCHIP_FWNODE_NAMED:
150 case IRQCHIP_FWNODE_NAMED_ID:
Dexuan Cui711419e2019-09-02 23:14:56 +0000151 domain->fwnode = fwnode;
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200152 domain->name = kstrdup(fwid->name, GFP_KERNEL);
153 if (!domain->name) {
154 kfree(domain);
155 return NULL;
156 }
157 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
158 break;
159 default:
160 domain->fwnode = fwnode;
161 domain->name = fwid->name;
162 break;
163 }
Andy Shevchenko9ed78b02020-05-20 19:49:27 +0300164 } else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) ||
165 is_software_node(fwnode)) {
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200166 char *name;
167
168 /*
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300169 * fwnode paths contain '/', which debugfs is legitimately
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200170 * unhappy about. Replace them with ':', which does
171 * the trick and is not as offensive as '\'...
172 */
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300173 name = kasprintf(GFP_KERNEL, "%pfw", fwnode);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200174 if (!name) {
175 kfree(domain);
176 return NULL;
177 }
178
179 strreplace(name, '/', ':');
180
181 domain->name = name;
182 domain->fwnode = fwnode;
183 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
184 }
185
186 if (!domain->name) {
Sakari Ailusdb3e50f2017-07-21 14:39:31 +0300187 if (fwnode)
188 pr_err("Invalid fwnode type for irqdomain\n");
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200189 domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
190 atomic_inc_return(&unknown_domains));
191 if (!domain->name) {
192 kfree(domain);
193 return NULL;
194 }
195 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
196 }
197
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300198 fwnode_handle_get(fwnode);
Marc Zyngierf1107112015-10-13 12:51:30 +0100199
Grant Likelycc79ca62012-02-16 01:37:49 -0700200 /* Fill structure */
Grant Likely1aa0dd92013-06-08 12:03:59 +0100201 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900202 mutex_init(&domain->revmap_tree_mutex);
Grant Likely68700652012-02-14 14:06:53 -0700203 domain->ops = ops;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700204 domain->host_data = host_data;
Grant Likelyddaf1442013-06-10 01:06:02 +0100205 domain->hwirq_max = hwirq_max;
Grant Likely1aa0dd92013-06-08 12:03:59 +0100206 domain->revmap_size = size;
Grant Likelyfa40f372013-06-08 12:57:40 +0100207 domain->revmap_direct_max_irq = direct_max;
Jiang Liuf8264e32014-11-06 22:20:14 +0800208 irq_domain_check_hierarchy(domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700209
Grant Likelya8db8cf2012-02-14 14:06:54 -0700210 mutex_lock(&irq_domain_mutex);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +0200211 debugfs_add_domain_dir(domain);
Grant Likelya8db8cf2012-02-14 14:06:54 -0700212 list_add(&domain->link, &irq_domain_list);
213 mutex_unlock(&irq_domain_mutex);
Grant Likelyfa40f372013-06-08 12:57:40 +0100214
Grant Likely1aa0dd92013-06-08 12:03:59 +0100215 pr_debug("Added domain %s\n", domain->name);
Grant Likelyfa40f372013-06-08 12:57:40 +0100216 return domain;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700217}
Grant Likelyfa40f372013-06-08 12:57:40 +0100218EXPORT_SYMBOL_GPL(__irq_domain_add);
Grant Likelya8db8cf2012-02-14 14:06:54 -0700219
Paul Mundt58ee99a2012-05-19 15:11:41 +0900220/**
221 * irq_domain_remove() - Remove an irq domain.
222 * @domain: domain to remove
223 *
224 * This routine is used to remove an irq domain. The caller must ensure
225 * that all mappings within the domain have been disposed of prior to
226 * use, depending on the revmap type.
227 */
228void irq_domain_remove(struct irq_domain *domain)
229{
230 mutex_lock(&irq_domain_mutex);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +0200231 debugfs_remove_domain_dir(domain);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900232
Matthew Wilcoxe9256ef2016-05-20 17:01:33 -0700233 WARN_ON(!radix_tree_empty(&domain->revmap_tree));
Paul Mundt58ee99a2012-05-19 15:11:41 +0900234
235 list_del(&domain->link);
236
237 /*
238 * If the going away domain is the default one, reset it.
239 */
240 if (unlikely(irq_default_domain == domain))
241 irq_set_default_host(NULL);
242
243 mutex_unlock(&irq_domain_mutex);
244
Grant Likely1aa0dd92013-06-08 12:03:59 +0100245 pr_debug("Removed domain %s\n", domain->name);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900246
Andy Shevchenko181e9d42020-05-20 19:49:25 +0300247 fwnode_handle_put(domain->fwnode);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200248 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
249 kfree(domain->name);
Grant Likelyfa40f372013-06-08 12:57:40 +0100250 kfree(domain);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900251}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900252EXPORT_SYMBOL_GPL(irq_domain_remove);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900253
Marc Zyngier61d0a002017-06-22 11:34:57 +0100254void irq_domain_update_bus_token(struct irq_domain *domain,
255 enum irq_domain_bus_token bus_token)
256{
257 char *name;
258
259 if (domain->bus_token == bus_token)
260 return;
261
262 mutex_lock(&irq_domain_mutex);
263
264 domain->bus_token = bus_token;
265
266 name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
267 if (!name) {
268 mutex_unlock(&irq_domain_mutex);
269 return;
270 }
271
272 debugfs_remove_domain_dir(domain);
273
274 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
275 kfree(domain->name);
276 else
277 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
278
279 domain->name = name;
280 debugfs_add_domain_dir(domain);
281
282 mutex_unlock(&irq_domain_mutex);
283}
284
Grant Likelya8db8cf2012-02-14 14:06:54 -0700285/**
Grant Likelyfa40f372013-06-08 12:57:40 +0100286 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
Mark Brown781d0f42012-07-05 12:19:19 +0100287 * @of_node: pointer to interrupt controller's device tree node.
288 * @size: total number of irqs in mapping
Linus Walleij94a63da2013-06-06 12:10:23 +0100289 * @first_irq: first number of irq block assigned to the domain,
Grant Likelyfa40f372013-06-08 12:57:40 +0100290 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
291 * pre-map all of the irqs in the domain to virqs starting at first_irq.
Jiang Liuf8264e32014-11-06 22:20:14 +0800292 * @ops: domain callbacks
Mark Brown781d0f42012-07-05 12:19:19 +0100293 * @host_data: Controller private data pointer
294 *
Grant Likelyfa40f372013-06-08 12:57:40 +0100295 * Allocates an irq_domain, and optionally if first_irq is positive then also
296 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
Mark Brown781d0f42012-07-05 12:19:19 +0100297 *
298 * This is intended to implement the expected behaviour for most
Grant Likelyfa40f372013-06-08 12:57:40 +0100299 * interrupt controllers. If device tree is used, then first_irq will be 0 and
300 * irqs get mapped dynamically on the fly. However, if the controller requires
301 * static virq assignments (non-DT boot) then it will set that up correctly.
Mark Brown781d0f42012-07-05 12:19:19 +0100302 */
303struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
304 unsigned int size,
305 unsigned int first_irq,
306 const struct irq_domain_ops *ops,
307 void *host_data)
308{
Grant Likelyfa40f372013-06-08 12:57:40 +0100309 struct irq_domain *domain;
Linus Walleij2854d162012-09-27 14:59:39 +0200310
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100311 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
Grant Likelyfa40f372013-06-08 12:57:40 +0100312 if (!domain)
313 return NULL;
314
315 if (first_irq > 0) {
Linus Walleij2854d162012-09-27 14:59:39 +0200316 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
Grant Likelyfa40f372013-06-08 12:57:40 +0100317 /* attempt to allocated irq_descs */
318 int rc = irq_alloc_descs(first_irq, first_irq, size,
319 of_node_to_nid(of_node));
320 if (rc < 0)
Linus Walleijd202b7b2012-11-27 01:20:32 +0100321 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
322 first_irq);
Grant Likelyfa40f372013-06-08 12:57:40 +0100323 }
Grant Likelyddaf1442013-06-10 01:06:02 +0100324 irq_domain_associate_many(domain, first_irq, 0, size);
Linus Walleij2854d162012-09-27 14:59:39 +0200325 }
326
Grant Likelyfa40f372013-06-08 12:57:40 +0100327 return domain;
Mark Brown781d0f42012-07-05 12:19:19 +0100328}
Arnd Bergmann346dbb72013-04-25 19:28:54 +0200329EXPORT_SYMBOL_GPL(irq_domain_add_simple);
Mark Brown781d0f42012-07-05 12:19:19 +0100330
331/**
Grant Likelya8db8cf2012-02-14 14:06:54 -0700332 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
333 * @of_node: pointer to interrupt controller's device tree node.
Grant Likely1bc04f22012-02-14 14:06:55 -0700334 * @size: total number of irqs in legacy mapping
335 * @first_irq: first number of irq block assigned to the domain
336 * @first_hwirq: first hwirq number to use for the translation. Should normally
337 * be '0', but a positive integer can be used if the effective
338 * hwirqs numbering does not begin at zero.
Grant Likelya8db8cf2012-02-14 14:06:54 -0700339 * @ops: map/unmap domain callbacks
340 * @host_data: Controller private data pointer
341 *
342 * Note: the map() callback will be called before this function returns
343 * for all legacy interrupts except 0 (which is always the invalid irq for
344 * a legacy controller).
345 */
346struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
Grant Likely1bc04f22012-02-14 14:06:55 -0700347 unsigned int size,
348 unsigned int first_irq,
349 irq_hw_number_t first_hwirq,
Grant Likelya18dc812012-01-26 12:12:14 -0700350 const struct irq_domain_ops *ops,
Grant Likelya8db8cf2012-02-14 14:06:54 -0700351 void *host_data)
352{
Grant Likely1bc04f22012-02-14 14:06:55 -0700353 struct irq_domain *domain;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700354
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100355 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
Grant Likelyddaf1442013-06-10 01:06:02 +0100356 first_hwirq + size, 0, ops, host_data);
Jiang Liuf8264e32014-11-06 22:20:14 +0800357 if (domain)
358 irq_domain_associate_many(domain, first_irq, first_hwirq, size);
Grant Likely1bc04f22012-02-14 14:06:55 -0700359
Grant Likelya8db8cf2012-02-14 14:06:54 -0700360 return domain;
361}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900362EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
Grant Likelycc79ca62012-02-16 01:37:49 -0700363
Grant Likelya8db8cf2012-02-14 14:06:54 -0700364/**
Marc Zyngier651e8b52016-04-11 09:57:51 +0100365 * irq_find_matching_fwspec() - Locates a domain for a given fwspec
366 * @fwspec: FW specifier for an interrupt
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100367 * @bus_token: domain-specific data
Grant Likelycc79ca62012-02-16 01:37:49 -0700368 */
Marc Zyngier651e8b52016-04-11 09:57:51 +0100369struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
Marc Zyngier130b8c62015-10-13 12:51:31 +0100370 enum irq_domain_bus_token bus_token)
Grant Likelycc79ca62012-02-16 01:37:49 -0700371{
372 struct irq_domain *h, *found = NULL;
Marc Zyngier651e8b52016-04-11 09:57:51 +0100373 struct fwnode_handle *fwnode = fwspec->fwnode;
Grant Likelya18dc812012-01-26 12:12:14 -0700374 int rc;
Grant Likelycc79ca62012-02-16 01:37:49 -0700375
376 /* We might want to match the legacy controller last since
377 * it might potentially be set to match all interrupts in
378 * the absence of a device node. This isn't a problem so far
379 * yet though...
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100380 *
381 * bus_token == DOMAIN_BUS_ANY matches any domain, any other
382 * values must generate an exact match for the domain to be
383 * selected.
Grant Likelycc79ca62012-02-16 01:37:49 -0700384 */
385 mutex_lock(&irq_domain_mutex);
Grant Likelya18dc812012-01-26 12:12:14 -0700386 list_for_each_entry(h, &irq_domain_list, link) {
Marc Zyngier651e8b52016-04-11 09:57:51 +0100387 if (h->ops->select && fwspec->param_count)
388 rc = h->ops->select(h, fwspec, bus_token);
389 else if (h->ops->match)
Marc Zyngier130b8c62015-10-13 12:51:31 +0100390 rc = h->ops->match(h, to_of_node(fwnode), bus_token);
Grant Likelya18dc812012-01-26 12:12:14 -0700391 else
Marc Zyngier130b8c62015-10-13 12:51:31 +0100392 rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100393 ((bus_token == DOMAIN_BUS_ANY) ||
394 (h->bus_token == bus_token)));
Grant Likelya18dc812012-01-26 12:12:14 -0700395
396 if (rc) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700397 found = h;
398 break;
399 }
Grant Likelya18dc812012-01-26 12:12:14 -0700400 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700401 mutex_unlock(&irq_domain_mutex);
402 return found;
403}
Marc Zyngier651e8b52016-04-11 09:57:51 +0100404EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
Grant Likelycc79ca62012-02-16 01:37:49 -0700405
406/**
Eric Augerc7b41f02017-01-19 20:57:59 +0000407 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
408 * IRQ remapping
409 *
410 * Return: false if any MSI irq domain does not support IRQ remapping,
411 * true otherwise (including if there is no MSI irq domain)
412 */
413bool irq_domain_check_msi_remap(void)
414{
415 struct irq_domain *h;
416 bool ret = true;
417
418 mutex_lock(&irq_domain_mutex);
419 list_for_each_entry(h, &irq_domain_list, link) {
420 if (irq_domain_is_msi(h) &&
421 !irq_domain_hierarchical_is_msi_remap(h)) {
422 ret = false;
423 break;
424 }
425 }
426 mutex_unlock(&irq_domain_mutex);
427 return ret;
428}
429EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
430
431/**
Grant Likelycc79ca62012-02-16 01:37:49 -0700432 * irq_set_default_host() - Set a "default" irq domain
Grant Likely68700652012-02-14 14:06:53 -0700433 * @domain: default domain pointer
Grant Likelycc79ca62012-02-16 01:37:49 -0700434 *
435 * For convenience, it's possible to set a "default" domain that will be used
436 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
437 * platforms that want to manipulate a few hard coded interrupt numbers that
438 * aren't properly represented in the device-tree.
439 */
Grant Likely68700652012-02-14 14:06:53 -0700440void irq_set_default_host(struct irq_domain *domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700441{
Paul Mundt54a90582012-05-19 15:11:47 +0900442 pr_debug("Default domain set to @0x%p\n", domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700443
Grant Likely68700652012-02-14 14:06:53 -0700444 irq_default_domain = domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700445}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900446EXPORT_SYMBOL_GPL(irq_set_default_host);
Grant Likelycc79ca62012-02-16 01:37:49 -0700447
Marc Zyngier9f199dd2019-02-20 08:59:23 +0000448/**
449 * irq_get_default_host() - Retrieve the "default" irq domain
450 *
451 * Returns: the default domain, if any.
452 *
453 * Modern code should never use this. This should only be used on
454 * systems that cannot implement a firmware->fwnode mapping (which
455 * both DT and ACPI provide).
456 */
457struct irq_domain *irq_get_default_host(void)
458{
459 return irq_default_domain;
460}
461
David Daneyb526adf2017-08-17 17:53:32 -0700462static void irq_domain_clear_mapping(struct irq_domain *domain,
463 irq_hw_number_t hwirq)
464{
465 if (hwirq < domain->revmap_size) {
466 domain->linear_revmap[hwirq] = 0;
467 } else {
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900468 mutex_lock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700469 radix_tree_delete(&domain->revmap_tree, hwirq);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900470 mutex_unlock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700471 }
472}
473
474static void irq_domain_set_mapping(struct irq_domain *domain,
475 irq_hw_number_t hwirq,
476 struct irq_data *irq_data)
477{
478 if (hwirq < domain->revmap_size) {
479 domain->linear_revmap[hwirq] = irq_data->irq;
480 } else {
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900481 mutex_lock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700482 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900483 mutex_unlock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700484 }
485}
486
Jiang Liu43a77592014-06-09 16:20:05 +0800487void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
Grant Likely913af202012-06-03 22:04:35 -0700488{
Grant Likelyddaf1442013-06-10 01:06:02 +0100489 struct irq_data *irq_data = irq_get_irq_data(irq);
490 irq_hw_number_t hwirq;
Grant Likely913af202012-06-03 22:04:35 -0700491
Grant Likelyddaf1442013-06-10 01:06:02 +0100492 if (WARN(!irq_data || irq_data->domain != domain,
493 "virq%i doesn't exist; cannot disassociate\n", irq))
494 return;
Grant Likely913af202012-06-03 22:04:35 -0700495
Grant Likelyddaf1442013-06-10 01:06:02 +0100496 hwirq = irq_data->hwirq;
497 irq_set_status_flags(irq, IRQ_NOREQUEST);
Grant Likely913af202012-06-03 22:04:35 -0700498
Grant Likelyddaf1442013-06-10 01:06:02 +0100499 /* remove chip and handler */
500 irq_set_chip_and_handler(irq, NULL, NULL);
Grant Likely913af202012-06-03 22:04:35 -0700501
Grant Likelyddaf1442013-06-10 01:06:02 +0100502 /* Make sure it's completed */
503 synchronize_irq(irq);
Grant Likely913af202012-06-03 22:04:35 -0700504
Grant Likelyddaf1442013-06-10 01:06:02 +0100505 /* Tell the PIC about it */
506 if (domain->ops->unmap)
507 domain->ops->unmap(domain, irq);
508 smp_mb();
Grant Likely913af202012-06-03 22:04:35 -0700509
Grant Likelyddaf1442013-06-10 01:06:02 +0100510 irq_data->domain = NULL;
511 irq_data->hwirq = 0;
Thomas Gleixner9dc6be32017-06-20 01:37:16 +0200512 domain->mapcount--;
Grant Likely913af202012-06-03 22:04:35 -0700513
Grant Likelyddaf1442013-06-10 01:06:02 +0100514 /* Clear reverse map for this hwirq */
David Daneyb526adf2017-08-17 17:53:32 -0700515 irq_domain_clear_mapping(domain, hwirq);
Grant Likely913af202012-06-03 22:04:35 -0700516}
517
Grant Likelyddaf1442013-06-10 01:06:02 +0100518int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
519 irq_hw_number_t hwirq)
Grant Likelycc79ca62012-02-16 01:37:49 -0700520{
Grant Likelyddaf1442013-06-10 01:06:02 +0100521 struct irq_data *irq_data = irq_get_irq_data(virq);
522 int ret;
523
524 if (WARN(hwirq >= domain->hwirq_max,
525 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
526 return -EINVAL;
527 if (WARN(!irq_data, "error: virq%i is not allocated", virq))
528 return -EINVAL;
529 if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
530 return -EINVAL;
531
532 mutex_lock(&irq_domain_mutex);
533 irq_data->hwirq = hwirq;
534 irq_data->domain = domain;
535 if (domain->ops->map) {
536 ret = domain->ops->map(domain, virq, hwirq);
537 if (ret != 0) {
538 /*
539 * If map() returns -EPERM, this interrupt is protected
540 * by the firmware or some other service and shall not
541 * be mapped. Don't bother telling the user about it.
542 */
543 if (ret != -EPERM) {
544 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
545 domain->name, hwirq, virq, ret);
546 }
547 irq_data->domain = NULL;
548 irq_data->hwirq = 0;
549 mutex_unlock(&irq_domain_mutex);
550 return ret;
551 }
552
553 /* If not already assigned, give the domain the chip's name */
554 if (!domain->name && irq_data->chip)
555 domain->name = irq_data->chip->name;
556 }
557
Thomas Gleixner9dc6be32017-06-20 01:37:16 +0200558 domain->mapcount++;
David Daneyb526adf2017-08-17 17:53:32 -0700559 irq_domain_set_mapping(domain, hwirq, irq_data);
Grant Likelyddaf1442013-06-10 01:06:02 +0100560 mutex_unlock(&irq_domain_mutex);
561
562 irq_clear_status_flags(virq, IRQ_NOREQUEST);
563
564 return 0;
565}
566EXPORT_SYMBOL_GPL(irq_domain_associate);
567
568void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
569 irq_hw_number_t hwirq_base, int count)
570{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100571 struct device_node *of_node;
Grant Likelyddaf1442013-06-10 01:06:02 +0100572 int i;
Grant Likelycc79ca62012-02-16 01:37:49 -0700573
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100574 of_node = irq_domain_get_of_node(domain);
Grant Likely98aa4682012-06-17 16:17:04 -0600575 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100576 of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
Grant Likely98aa4682012-06-17 16:17:04 -0600577
578 for (i = 0; i < count; i++) {
Grant Likelyddaf1442013-06-10 01:06:02 +0100579 irq_domain_associate(domain, irq_base + i, hwirq_base + i);
Grant Likelycc79ca62012-02-16 01:37:49 -0700580 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700581}
Grant Likely98aa4682012-06-17 16:17:04 -0600582EXPORT_SYMBOL_GPL(irq_domain_associate_many);
Grant Likelycc79ca62012-02-16 01:37:49 -0700583
584/**
585 * irq_create_direct_mapping() - Allocate an irq for direct mapping
Grant Likely68700652012-02-14 14:06:53 -0700586 * @domain: domain to allocate the irq for or NULL for default domain
Grant Likelycc79ca62012-02-16 01:37:49 -0700587 *
588 * This routine is used for irq controllers which can choose the hardware
589 * interrupt numbers they generate. In such a case it's simplest to use
Grant Likely1aa0dd92013-06-08 12:03:59 +0100590 * the linux irq as the hardware interrupt number. It still uses the linear
591 * or radix tree to store the mapping, but the irq controller can optimize
592 * the revmap path by using the hwirq directly.
Grant Likelycc79ca62012-02-16 01:37:49 -0700593 */
Grant Likely68700652012-02-14 14:06:53 -0700594unsigned int irq_create_direct_mapping(struct irq_domain *domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700595{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100596 struct device_node *of_node;
Grant Likelycc79ca62012-02-16 01:37:49 -0700597 unsigned int virq;
598
Grant Likely68700652012-02-14 14:06:53 -0700599 if (domain == NULL)
600 domain = irq_default_domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700601
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100602 of_node = irq_domain_get_of_node(domain);
603 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
Grant Likely03848372012-02-14 14:06:52 -0700604 if (!virq) {
Paul Mundt54a90582012-05-19 15:11:47 +0900605 pr_debug("create_direct virq allocation failed\n");
Grant Likely03848372012-02-14 14:06:52 -0700606 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700607 }
Grant Likely1aa0dd92013-06-08 12:03:59 +0100608 if (virq >= domain->revmap_direct_max_irq) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700609 pr_err("ERROR: no free irqs available below %i maximum\n",
Grant Likely1aa0dd92013-06-08 12:03:59 +0100610 domain->revmap_direct_max_irq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700611 irq_free_desc(virq);
612 return 0;
613 }
Paul Mundt54a90582012-05-19 15:11:47 +0900614 pr_debug("create_direct obtained virq %d\n", virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700615
Grant Likely98aa4682012-06-17 16:17:04 -0600616 if (irq_domain_associate(domain, virq, virq)) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700617 irq_free_desc(virq);
Grant Likely03848372012-02-14 14:06:52 -0700618 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700619 }
620
621 return virq;
622}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900623EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
Grant Likelycc79ca62012-02-16 01:37:49 -0700624
625/**
626 * irq_create_mapping() - Map a hardware interrupt into linux irq space
Grant Likely68700652012-02-14 14:06:53 -0700627 * @domain: domain owning this hardware interrupt or NULL for default domain
628 * @hwirq: hardware irq number in that domain space
Grant Likelycc79ca62012-02-16 01:37:49 -0700629 *
630 * Only one mapping per hardware interrupt is permitted. Returns a linux
631 * irq number.
632 * If the sense/trigger is to be specified, set_irq_type() should be called
633 * on the number returned from that call.
634 */
Grant Likely68700652012-02-14 14:06:53 -0700635unsigned int irq_create_mapping(struct irq_domain *domain,
Grant Likelycc79ca62012-02-16 01:37:49 -0700636 irq_hw_number_t hwirq)
637{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100638 struct device_node *of_node;
David Daney5b7526e2012-04-05 16:52:13 -0700639 int virq;
Grant Likelycc79ca62012-02-16 01:37:49 -0700640
Paul Mundt54a90582012-05-19 15:11:47 +0900641 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700642
Grant Likely68700652012-02-14 14:06:53 -0700643 /* Look for default domain if nececssary */
644 if (domain == NULL)
645 domain = irq_default_domain;
646 if (domain == NULL) {
Kefeng Wang798f0fd2013-06-06 19:20:27 +0800647 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
Grant Likely03848372012-02-14 14:06:52 -0700648 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700649 }
Paul Mundt54a90582012-05-19 15:11:47 +0900650 pr_debug("-> using domain @%p\n", domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700651
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100652 of_node = irq_domain_get_of_node(domain);
653
Grant Likelycc79ca62012-02-16 01:37:49 -0700654 /* Check if mapping already exists */
Grant Likely68700652012-02-14 14:06:53 -0700655 virq = irq_find_mapping(domain, hwirq);
Grant Likely03848372012-02-14 14:06:52 -0700656 if (virq) {
Paul Mundt54a90582012-05-19 15:11:47 +0900657 pr_debug("-> existing mapping on virq %d\n", virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700658 return virq;
659 }
660
Grant Likely1bc04f22012-02-14 14:06:55 -0700661 /* Allocate a virtual interrupt number */
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900662 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
David Daney5b7526e2012-04-05 16:52:13 -0700663 if (virq <= 0) {
Paul Mundt54a90582012-05-19 15:11:47 +0900664 pr_debug("-> virq allocation failed\n");
Grant Likely1bc04f22012-02-14 14:06:55 -0700665 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700666 }
667
Grant Likely98aa4682012-06-17 16:17:04 -0600668 if (irq_domain_associate(domain, virq, hwirq)) {
Grant Likely73255702012-06-03 22:04:35 -0700669 irq_free_desc(virq);
Grant Likely03848372012-02-14 14:06:52 -0700670 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700671 }
672
Paul Mundt54a90582012-05-19 15:11:47 +0900673 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100674 hwirq, of_node_full_name(of_node), virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700675
676 return virq;
677}
678EXPORT_SYMBOL_GPL(irq_create_mapping);
679
Grant Likely98aa4682012-06-17 16:17:04 -0600680/**
681 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
682 * @domain: domain owning the interrupt range
683 * @irq_base: beginning of linux IRQ range
684 * @hwirq_base: beginning of hardware IRQ range
685 * @count: Number of interrupts to map
686 *
687 * This routine is used for allocating and mapping a range of hardware
688 * irqs to linux irqs where the linux irq numbers are at pre-defined
689 * locations. For use by controllers that already have static mappings
690 * to insert in to the domain.
691 *
692 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
693 * domain insertion.
694 *
695 * 0 is returned upon success, while any failure to establish a static
696 * mapping is treated as an error.
697 */
698int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
699 irq_hw_number_t hwirq_base, int count)
700{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100701 struct device_node *of_node;
Grant Likely98aa4682012-06-17 16:17:04 -0600702 int ret;
703
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100704 of_node = irq_domain_get_of_node(domain);
Grant Likely98aa4682012-06-17 16:17:04 -0600705 ret = irq_alloc_descs(irq_base, irq_base, count,
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100706 of_node_to_nid(of_node));
Grant Likely98aa4682012-06-17 16:17:04 -0600707 if (unlikely(ret < 0))
708 return ret;
709
Grant Likelyddaf1442013-06-10 01:06:02 +0100710 irq_domain_associate_many(domain, irq_base, hwirq_base, count);
Grant Likely98aa4682012-06-17 16:17:04 -0600711 return 0;
712}
713EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
714
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100715static int irq_domain_translate(struct irq_domain *d,
716 struct irq_fwspec *fwspec,
717 irq_hw_number_t *hwirq, unsigned int *type)
718{
719#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
720 if (d->ops->translate)
721 return d->ops->translate(d, fwspec, hwirq, type);
722#endif
723 if (d->ops->xlate)
724 return d->ops->xlate(d, to_of_node(fwspec->fwnode),
725 fwspec->param, fwspec->param_count,
726 hwirq, type);
727
728 /* If domain has no translation, then we assume interrupt line */
729 *hwirq = fwspec->param[0];
730 return 0;
731}
732
Brian Masneyb5c231d2019-02-07 21:16:22 -0500733static void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
734 unsigned int count,
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100735 struct irq_fwspec *fwspec)
736{
737 int i;
738
Brian Masneyb5c231d2019-02-07 21:16:22 -0500739 fwspec->fwnode = np ? &np->fwnode : NULL;
740 fwspec->param_count = count;
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100741
Brian Masneyb5c231d2019-02-07 21:16:22 -0500742 for (i = 0; i < count; i++)
743 fwspec->param[i] = args[i];
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100744}
745
Marc Zyngierc0131f02015-10-13 12:51:34 +0100746unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
Grant Likelycc79ca62012-02-16 01:37:49 -0700747{
Grant Likely68700652012-02-14 14:06:53 -0700748 struct irq_domain *domain;
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100749 struct irq_data *irq_data;
Grant Likelycc79ca62012-02-16 01:37:49 -0700750 irq_hw_number_t hwirq;
751 unsigned int type = IRQ_TYPE_NONE;
Jiang Liuf8264e32014-11-06 22:20:14 +0800752 int virq;
Grant Likelycc79ca62012-02-16 01:37:49 -0700753
Marc Zyngier530cbe12016-01-26 13:52:25 +0000754 if (fwspec->fwnode) {
Marc Zyngier651e8b52016-04-11 09:57:51 +0100755 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
Marc Zyngier530cbe12016-01-26 13:52:25 +0000756 if (!domain)
Marc Zyngier651e8b52016-04-11 09:57:51 +0100757 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
Marc Zyngier530cbe12016-01-26 13:52:25 +0000758 } else {
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100759 domain = irq_default_domain;
Marc Zyngier530cbe12016-01-26 13:52:25 +0000760 }
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100761
Grant Likely68700652012-02-14 14:06:53 -0700762 if (!domain) {
Kefeng Wang798f0fd2013-06-06 19:20:27 +0800763 pr_warn("no irq domain found for %s !\n",
Marc Zyngierc0131f02015-10-13 12:51:34 +0100764 of_node_full_name(to_of_node(fwspec->fwnode)));
Grant Likely03848372012-02-14 14:06:52 -0700765 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700766 }
767
Marc Zyngierc0131f02015-10-13 12:51:34 +0100768 if (irq_domain_translate(domain, fwspec, &hwirq, &type))
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100769 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700770
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100771 /*
772 * WARN if the irqchip returns a type with bits
773 * outside the sense mask set and clear these bits.
774 */
775 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
776 type &= IRQ_TYPE_SENSE_MASK;
777
778 /*
779 * If we've already configured this interrupt,
780 * don't do it again, or hell will break loose.
781 */
782 virq = irq_find_mapping(domain, hwirq);
783 if (virq) {
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800784 /*
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100785 * If the trigger type is not specified or matches the
786 * current trigger type then we are done so return the
787 * interrupt number.
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800788 */
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100789 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800790 return virq;
791
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100792 /*
793 * If the trigger type has not been set yet, then set
794 * it now and return the interrupt number.
795 */
796 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100797 irq_data = irq_get_irq_data(virq);
798 if (!irq_data)
799 return 0;
800
801 irqd_set_trigger_type(irq_data, type);
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100802 return virq;
803 }
804
805 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
806 hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
807 return 0;
808 }
809
810 if (irq_domain_is_hierarchy(domain)) {
Marc Zyngierc0131f02015-10-13 12:51:34 +0100811 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800812 if (virq <= 0)
813 return 0;
814 } else {
815 /* Create mapping */
816 virq = irq_create_mapping(domain, hwirq);
817 if (!virq)
818 return virq;
819 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700820
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100821 irq_data = irq_get_irq_data(virq);
822 if (!irq_data) {
823 if (irq_domain_is_hierarchy(domain))
824 irq_domain_free_irqs(virq, 1);
825 else
826 irq_dispose_mapping(virq);
827 return 0;
828 }
829
830 /* Store trigger type */
831 irqd_set_trigger_type(irq_data, type);
832
Grant Likelycc79ca62012-02-16 01:37:49 -0700833 return virq;
834}
Marc Zyngierc0131f02015-10-13 12:51:34 +0100835EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
836
837unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
838{
839 struct irq_fwspec fwspec;
840
Brian Masneyb5c231d2019-02-07 21:16:22 -0500841 of_phandle_args_to_fwspec(irq_data->np, irq_data->args,
842 irq_data->args_count, &fwspec);
843
Marc Zyngierc0131f02015-10-13 12:51:34 +0100844 return irq_create_fwspec_mapping(&fwspec);
845}
Grant Likelycc79ca62012-02-16 01:37:49 -0700846EXPORT_SYMBOL_GPL(irq_create_of_mapping);
847
848/**
849 * irq_dispose_mapping() - Unmap an interrupt
850 * @virq: linux irq number of the interrupt to unmap
851 */
852void irq_dispose_mapping(unsigned int virq)
853{
854 struct irq_data *irq_data = irq_get_irq_data(virq);
Grant Likely68700652012-02-14 14:06:53 -0700855 struct irq_domain *domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700856
Grant Likely03848372012-02-14 14:06:52 -0700857 if (!virq || !irq_data)
Grant Likelycc79ca62012-02-16 01:37:49 -0700858 return;
859
Grant Likely68700652012-02-14 14:06:53 -0700860 domain = irq_data->domain;
861 if (WARN_ON(domain == NULL))
Grant Likelycc79ca62012-02-16 01:37:49 -0700862 return;
863
Jon Hunterd16dcd3d2016-06-21 10:23:22 +0100864 if (irq_domain_is_hierarchy(domain)) {
865 irq_domain_free_irqs(virq, 1);
866 } else {
867 irq_domain_disassociate(domain, virq);
868 irq_free_desc(virq);
869 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700870}
871EXPORT_SYMBOL_GPL(irq_dispose_mapping);
872
873/**
Geert Uytterhoevenb8d62f32018-10-08 13:17:26 +0200874 * irq_find_mapping() - Find a linux irq from a hw irq number.
Grant Likely68700652012-02-14 14:06:53 -0700875 * @domain: domain owning this hardware interrupt
876 * @hwirq: hardware irq number in that domain space
Grant Likelycc79ca62012-02-16 01:37:49 -0700877 */
Grant Likely68700652012-02-14 14:06:53 -0700878unsigned int irq_find_mapping(struct irq_domain *domain,
Grant Likelycc79ca62012-02-16 01:37:49 -0700879 irq_hw_number_t hwirq)
880{
Grant Likely4c0946c2012-06-03 22:04:39 -0700881 struct irq_data *data;
Grant Likelycc79ca62012-02-16 01:37:49 -0700882
Grant Likely68700652012-02-14 14:06:53 -0700883 /* Look for default domain if nececssary */
884 if (domain == NULL)
885 domain = irq_default_domain;
886 if (domain == NULL)
Grant Likely03848372012-02-14 14:06:52 -0700887 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700888
Grant Likely1aa0dd92013-06-08 12:03:59 +0100889 if (hwirq < domain->revmap_direct_max_irq) {
Jiang Liuf8264e32014-11-06 22:20:14 +0800890 data = irq_domain_get_irq_data(domain, hwirq);
891 if (data && data->hwirq == hwirq)
Grant Likely4c0946c2012-06-03 22:04:39 -0700892 return hwirq;
Grant Likely4c0946c2012-06-03 22:04:39 -0700893 }
894
Grant Likelyd3dcb432013-06-10 12:19:17 +0100895 /* Check if the hwirq is in the linear revmap. */
896 if (hwirq < domain->revmap_size)
897 return domain->linear_revmap[hwirq];
898
899 rcu_read_lock();
900 data = radix_tree_lookup(&domain->revmap_tree, hwirq);
901 rcu_read_unlock();
902 return data ? data->irq : 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700903}
904EXPORT_SYMBOL_GPL(irq_find_mapping);
905
Grant Likely16b2e6e2012-01-26 11:26:52 -0700906/**
907 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
908 *
909 * Device Tree IRQ specifier translation function which works with one cell
910 * bindings where the cell value maps directly to the hwirq number.
911 */
912int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
913 const u32 *intspec, unsigned int intsize,
914 unsigned long *out_hwirq, unsigned int *out_type)
Grant Likely7e713302011-07-26 03:19:06 -0600915{
Grant Likely16b2e6e2012-01-26 11:26:52 -0700916 if (WARN_ON(intsize < 1))
Grant Likely7e713302011-07-26 03:19:06 -0600917 return -EINVAL;
Grant Likely7e713302011-07-26 03:19:06 -0600918 *out_hwirq = intspec[0];
919 *out_type = IRQ_TYPE_NONE;
Grant Likely7e713302011-07-26 03:19:06 -0600920 return 0;
921}
Grant Likely16b2e6e2012-01-26 11:26:52 -0700922EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
923
924/**
925 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
926 *
927 * Device Tree IRQ specifier translation function which works with two cell
928 * bindings where the cell values map directly to the hwirq number
929 * and linux irq flags.
930 */
931int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
932 const u32 *intspec, unsigned int intsize,
933 irq_hw_number_t *out_hwirq, unsigned int *out_type)
934{
Brian Masneyb5c231d2019-02-07 21:16:22 -0500935 struct irq_fwspec fwspec;
936
937 of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec);
938 return irq_domain_translate_twocell(d, &fwspec, out_hwirq, out_type);
Grant Likely16b2e6e2012-01-26 11:26:52 -0700939}
940EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
941
942/**
943 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
944 *
945 * Device Tree IRQ specifier translation function which works with either one
946 * or two cell bindings where the cell values map directly to the hwirq number
947 * and linux irq flags.
948 *
949 * Note: don't use this function unless your interrupt controller explicitly
950 * supports both one and two cell bindings. For the majority of controllers
951 * the _onecell() or _twocell() variants above should be used.
952 */
953int irq_domain_xlate_onetwocell(struct irq_domain *d,
954 struct device_node *ctrlr,
955 const u32 *intspec, unsigned int intsize,
956 unsigned long *out_hwirq, unsigned int *out_type)
957{
958 if (WARN_ON(intsize < 1))
959 return -EINVAL;
960 *out_hwirq = intspec[0];
Sebastian Frias0c228912016-08-02 10:52:45 +0200961 if (intsize > 1)
962 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
963 else
964 *out_type = IRQ_TYPE_NONE;
Grant Likely16b2e6e2012-01-26 11:26:52 -0700965 return 0;
966}
967EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
Grant Likely7e713302011-07-26 03:19:06 -0600968
Grant Likelya18dc812012-01-26 12:12:14 -0700969const struct irq_domain_ops irq_domain_simple_ops = {
Grant Likely16b2e6e2012-01-26 11:26:52 -0700970 .xlate = irq_domain_xlate_onetwocell,
Grant Likely75294952012-02-14 14:06:57 -0700971};
972EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
Jiang Liuf8264e32014-11-06 22:20:14 +0800973
Brian Masneyb5c231d2019-02-07 21:16:22 -0500974/**
Yash Shahb01ecce2019-12-10 16:41:09 +0530975 * irq_domain_translate_onecell() - Generic translate for direct one cell
976 * bindings
977 */
978int irq_domain_translate_onecell(struct irq_domain *d,
979 struct irq_fwspec *fwspec,
980 unsigned long *out_hwirq,
981 unsigned int *out_type)
982{
983 if (WARN_ON(fwspec->param_count < 1))
984 return -EINVAL;
985 *out_hwirq = fwspec->param[0];
986 *out_type = IRQ_TYPE_NONE;
987 return 0;
988}
989EXPORT_SYMBOL_GPL(irq_domain_translate_onecell);
990
991/**
Brian Masneyb5c231d2019-02-07 21:16:22 -0500992 * irq_domain_translate_twocell() - Generic translate for direct two cell
993 * bindings
994 *
995 * Device Tree IRQ specifier translation function which works with two cell
996 * bindings where the cell values map directly to the hwirq number
997 * and linux irq flags.
998 */
999int irq_domain_translate_twocell(struct irq_domain *d,
1000 struct irq_fwspec *fwspec,
1001 unsigned long *out_hwirq,
1002 unsigned int *out_type)
1003{
1004 if (WARN_ON(fwspec->param_count < 2))
1005 return -EINVAL;
1006 *out_hwirq = fwspec->param[0];
1007 *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
1008 return 0;
1009}
1010EXPORT_SYMBOL_GPL(irq_domain_translate_twocell);
1011
Qais Yousefac0a0cd2015-12-08 13:20:18 +00001012int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
Dou Liyangbec04032018-12-04 23:51:20 +08001013 int node, const struct irq_affinity_desc *affinity)
Jiang Liuf8264e32014-11-06 22:20:14 +08001014{
1015 unsigned int hint;
1016
1017 if (virq >= 0) {
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001018 virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
1019 affinity);
Jiang Liuf8264e32014-11-06 22:20:14 +08001020 } else {
1021 hint = hwirq % nr_irqs;
1022 if (hint == 0)
1023 hint++;
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001024 virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
1025 affinity);
1026 if (virq <= 0 && hint > 1) {
1027 virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
1028 affinity);
1029 }
Jiang Liuf8264e32014-11-06 22:20:14 +08001030 }
1031
1032 return virq;
1033}
1034
Bartosz Golaszewski5c8f77a2020-05-14 10:39:00 +02001035/**
1036 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1037 * @irq_data: The pointer to irq_data
1038 */
1039void irq_domain_reset_irq_data(struct irq_data *irq_data)
1040{
1041 irq_data->hwirq = 0;
1042 irq_data->chip = &no_irq_chip;
1043 irq_data->chip_data = NULL;
1044}
1045EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1046
Jiang Liuf8264e32014-11-06 22:20:14 +08001047#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
Jiang Liuafb7da82014-11-15 22:24:02 +08001048/**
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001049 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
Jiang Liuafb7da82014-11-15 22:24:02 +08001050 * @parent: Parent irq domain to associate with the new domain
1051 * @flags: Irq domain flags associated to the domain
1052 * @size: Size of the domain. See below
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001053 * @fwnode: Optional fwnode of the interrupt controller
Jiang Liuafb7da82014-11-15 22:24:02 +08001054 * @ops: Pointer to the interrupt domain callbacks
1055 * @host_data: Controller private data pointer
1056 *
1057 * If @size is 0 a tree domain is created, otherwise a linear domain.
1058 *
1059 * If successful the parent is associated to the new domain and the
1060 * domain flags are set.
1061 * Returns pointer to IRQ domain, or NULL on failure.
1062 */
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001063struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
Jiang Liuafb7da82014-11-15 22:24:02 +08001064 unsigned int flags,
1065 unsigned int size,
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001066 struct fwnode_handle *fwnode,
Jiang Liuafb7da82014-11-15 22:24:02 +08001067 const struct irq_domain_ops *ops,
1068 void *host_data)
1069{
1070 struct irq_domain *domain;
1071
1072 if (size)
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001073 domain = irq_domain_create_linear(fwnode, size, ops, host_data);
Jiang Liuafb7da82014-11-15 22:24:02 +08001074 else
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001075 domain = irq_domain_create_tree(fwnode, ops, host_data);
Jiang Liuafb7da82014-11-15 22:24:02 +08001076 if (domain) {
1077 domain->parent = parent;
1078 domain->flags |= flags;
1079 }
1080
1081 return domain;
1082}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001083EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
Jiang Liuafb7da82014-11-15 22:24:02 +08001084
Jiang Liuf8264e32014-11-06 22:20:14 +08001085static void irq_domain_insert_irq(int virq)
1086{
1087 struct irq_data *data;
1088
1089 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1090 struct irq_domain *domain = data->domain;
Jiang Liuf8264e32014-11-06 22:20:14 +08001091
Thomas Gleixner9dc6be32017-06-20 01:37:16 +02001092 domain->mapcount++;
David Daneyb526adf2017-08-17 17:53:32 -07001093 irq_domain_set_mapping(domain, data->hwirq, data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001094
1095 /* If not already assigned, give the domain the chip's name */
1096 if (!domain->name && data->chip)
1097 domain->name = data->chip->name;
1098 }
1099
1100 irq_clear_status_flags(virq, IRQ_NOREQUEST);
1101}
1102
1103static void irq_domain_remove_irq(int virq)
1104{
1105 struct irq_data *data;
1106
1107 irq_set_status_flags(virq, IRQ_NOREQUEST);
1108 irq_set_chip_and_handler(virq, NULL, NULL);
1109 synchronize_irq(virq);
1110 smp_mb();
1111
1112 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1113 struct irq_domain *domain = data->domain;
1114 irq_hw_number_t hwirq = data->hwirq;
1115
Thomas Gleixner9dc6be32017-06-20 01:37:16 +02001116 domain->mapcount--;
David Daneyb526adf2017-08-17 17:53:32 -07001117 irq_domain_clear_mapping(domain, hwirq);
Jiang Liuf8264e32014-11-06 22:20:14 +08001118 }
1119}
1120
1121static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
1122 struct irq_data *child)
1123{
1124 struct irq_data *irq_data;
1125
Jiang Liu67830112015-06-01 16:05:13 +08001126 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
1127 irq_data_get_node(child));
Jiang Liuf8264e32014-11-06 22:20:14 +08001128 if (irq_data) {
1129 child->parent_data = irq_data;
1130 irq_data->irq = child->irq;
Jiang Liu0d0b4c82015-06-01 16:05:12 +08001131 irq_data->common = child->common;
Jiang Liuf8264e32014-11-06 22:20:14 +08001132 irq_data->domain = domain;
1133 }
1134
1135 return irq_data;
1136}
1137
1138static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
1139{
1140 struct irq_data *irq_data, *tmp;
1141 int i;
1142
1143 for (i = 0; i < nr_irqs; i++) {
1144 irq_data = irq_get_irq_data(virq + i);
1145 tmp = irq_data->parent_data;
1146 irq_data->parent_data = NULL;
1147 irq_data->domain = NULL;
1148
1149 while (tmp) {
1150 irq_data = tmp;
1151 tmp = tmp->parent_data;
1152 kfree(irq_data);
1153 }
1154 }
1155}
1156
1157static int irq_domain_alloc_irq_data(struct irq_domain *domain,
1158 unsigned int virq, unsigned int nr_irqs)
1159{
1160 struct irq_data *irq_data;
1161 struct irq_domain *parent;
1162 int i;
1163
1164 /* The outermost irq_data is embedded in struct irq_desc */
1165 for (i = 0; i < nr_irqs; i++) {
1166 irq_data = irq_get_irq_data(virq + i);
1167 irq_data->domain = domain;
1168
1169 for (parent = domain->parent; parent; parent = parent->parent) {
1170 irq_data = irq_domain_insert_irq_data(parent, irq_data);
1171 if (!irq_data) {
1172 irq_domain_free_irq_data(virq, i + 1);
1173 return -ENOMEM;
1174 }
1175 }
1176 }
1177
1178 return 0;
1179}
1180
1181/**
1182 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1183 * @domain: domain to match
1184 * @virq: IRQ number to get irq_data
1185 */
1186struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1187 unsigned int virq)
1188{
1189 struct irq_data *irq_data;
1190
1191 for (irq_data = irq_get_irq_data(virq); irq_data;
1192 irq_data = irq_data->parent_data)
1193 if (irq_data->domain == domain)
1194 return irq_data;
1195
1196 return NULL;
1197}
Jake Oshinsa4289dc2015-12-10 17:52:59 +00001198EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001199
1200/**
1201 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1202 * @domain: Interrupt domain to match
1203 * @virq: IRQ number
1204 * @hwirq: The hwirq number
1205 * @chip: The associated interrupt chip
1206 * @chip_data: The associated chip data
1207 */
1208int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1209 irq_hw_number_t hwirq, struct irq_chip *chip,
1210 void *chip_data)
1211{
1212 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
1213
1214 if (!irq_data)
1215 return -ENOENT;
1216
1217 irq_data->hwirq = hwirq;
1218 irq_data->chip = chip ? chip : &no_irq_chip;
1219 irq_data->chip_data = chip_data;
1220
1221 return 0;
1222}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001223EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
Jiang Liuf8264e32014-11-06 22:20:14 +08001224
1225/**
Jiang Liu1b537702014-11-09 23:10:24 +08001226 * irq_domain_set_info - Set the complete data for a @virq in @domain
1227 * @domain: Interrupt domain to match
1228 * @virq: IRQ number
1229 * @hwirq: The hardware interrupt number
1230 * @chip: The associated interrupt chip
1231 * @chip_data: The associated interrupt chip data
1232 * @handler: The interrupt flow handler
1233 * @handler_data: The interrupt flow handler data
1234 * @handler_name: The interrupt handler name
1235 */
1236void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1237 irq_hw_number_t hwirq, struct irq_chip *chip,
1238 void *chip_data, irq_flow_handler_t handler,
1239 void *handler_data, const char *handler_name)
1240{
1241 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
1242 __irq_set_handler(virq, handler, 0, handler_name);
1243 irq_set_handler_data(virq, handler_data);
1244}
Keith Busch64bce3e2016-01-12 13:18:07 -07001245EXPORT_SYMBOL(irq_domain_set_info);
Jiang Liu1b537702014-11-09 23:10:24 +08001246
1247/**
Jiang Liuf8264e32014-11-06 22:20:14 +08001248 * irq_domain_free_irqs_common - Clear irq_data and free the parent
1249 * @domain: Interrupt domain to match
1250 * @virq: IRQ number to start with
1251 * @nr_irqs: The number of irqs to free
1252 */
1253void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
1254 unsigned int nr_irqs)
1255{
1256 struct irq_data *irq_data;
1257 int i;
1258
1259 for (i = 0; i < nr_irqs; i++) {
1260 irq_data = irq_domain_get_irq_data(domain, virq + i);
1261 if (irq_data)
1262 irq_domain_reset_irq_data(irq_data);
1263 }
1264 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1265}
Axel Lin63cc7872016-03-17 12:00:31 +08001266EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
Jiang Liuf8264e32014-11-06 22:20:14 +08001267
1268/**
1269 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
1270 * @domain: Interrupt domain to match
1271 * @virq: IRQ number to start with
1272 * @nr_irqs: The number of irqs to free
1273 */
1274void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
1275 unsigned int nr_irqs)
1276{
1277 int i;
1278
1279 for (i = 0; i < nr_irqs; i++) {
1280 irq_set_handler_data(virq + i, NULL);
1281 irq_set_handler(virq + i, NULL);
1282 }
1283 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1284}
1285
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001286static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
Jiang Liu36d72732014-11-15 22:24:01 +08001287 unsigned int irq_base,
1288 unsigned int nr_irqs)
1289{
David Daney0d12ec02017-08-17 17:53:33 -07001290 if (domain->ops->free)
1291 domain->ops->free(domain, irq_base, nr_irqs);
Jiang Liu36d72732014-11-15 22:24:01 +08001292}
1293
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001294int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
Marc Zyngierc4665952015-11-23 08:26:04 +00001295 unsigned int irq_base,
1296 unsigned int nr_irqs, void *arg)
Jiang Liu36d72732014-11-15 22:24:01 +08001297{
Alexander Sverdlin87f2d1c2020-03-06 18:47:20 +01001298 if (!domain->ops->alloc) {
1299 pr_debug("domain->ops->alloc() is NULL\n");
1300 return -ENOSYS;
1301 }
1302
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001303 return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
Jiang Liu36d72732014-11-15 22:24:01 +08001304}
1305
Jiang Liuf8264e32014-11-06 22:20:14 +08001306/**
1307 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1308 * @domain: domain to allocate from
Julien Grall08970ec2019-04-18 16:54:01 +01001309 * @irq_base: allocate specified IRQ number if irq_base >= 0
Jiang Liuf8264e32014-11-06 22:20:14 +08001310 * @nr_irqs: number of IRQs to allocate
1311 * @node: NUMA node id for memory allocation
1312 * @arg: domain specific argument
1313 * @realloc: IRQ descriptors have already been allocated if true
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001314 * @affinity: Optional irq affinity mask for multiqueue devices
Jiang Liuf8264e32014-11-06 22:20:14 +08001315 *
1316 * Allocate IRQ numbers and initialized all data structures to support
1317 * hierarchy IRQ domains.
1318 * Parameter @realloc is mainly to support legacy IRQs.
1319 * Returns error code or allocated IRQ number
1320 *
1321 * The whole process to setup an IRQ has been split into two steps.
1322 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1323 * descriptor and required hardware resources. The second step,
1324 * irq_domain_activate_irq(), is to program hardwares with preallocated
1325 * resources. In this way, it's easier to rollback when failing to
1326 * allocate resources.
1327 */
1328int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1329 unsigned int nr_irqs, int node, void *arg,
Dou Liyangbec04032018-12-04 23:51:20 +08001330 bool realloc, const struct irq_affinity_desc *affinity)
Jiang Liuf8264e32014-11-06 22:20:14 +08001331{
1332 int i, ret, virq;
1333
1334 if (domain == NULL) {
1335 domain = irq_default_domain;
1336 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1337 return -EINVAL;
1338 }
1339
Jiang Liuf8264e32014-11-06 22:20:14 +08001340 if (realloc && irq_base >= 0) {
1341 virq = irq_base;
1342 } else {
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001343 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
1344 affinity);
Jiang Liuf8264e32014-11-06 22:20:14 +08001345 if (virq < 0) {
1346 pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1347 irq_base, nr_irqs);
1348 return virq;
1349 }
1350 }
1351
1352 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1353 pr_debug("cannot allocate memory for IRQ%d\n", virq);
1354 ret = -ENOMEM;
1355 goto out_free_desc;
1356 }
1357
1358 mutex_lock(&irq_domain_mutex);
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001359 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
Jiang Liuf8264e32014-11-06 22:20:14 +08001360 if (ret < 0) {
1361 mutex_unlock(&irq_domain_mutex);
1362 goto out_free_irq_data;
1363 }
1364 for (i = 0; i < nr_irqs; i++)
1365 irq_domain_insert_irq(virq + i);
1366 mutex_unlock(&irq_domain_mutex);
1367
1368 return virq;
1369
1370out_free_irq_data:
1371 irq_domain_free_irq_data(virq, nr_irqs);
1372out_free_desc:
1373 irq_free_descs(virq, nr_irqs);
1374 return ret;
1375}
1376
David Daney495c38d2017-08-17 17:53:34 -07001377/* The irq_data was moved, fix the revmap to refer to the new location */
1378static void irq_domain_fix_revmap(struct irq_data *d)
1379{
Masahiro Yamadad03cc2d2017-09-22 21:20:41 +09001380 void __rcu **slot;
David Daney495c38d2017-08-17 17:53:34 -07001381
1382 if (d->hwirq < d->domain->revmap_size)
1383 return; /* Not using radix tree. */
1384
1385 /* Fix up the revmap. */
Masahiro Yamadaf1d78352017-10-05 10:44:54 +09001386 mutex_lock(&d->domain->revmap_tree_mutex);
David Daney495c38d2017-08-17 17:53:34 -07001387 slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1388 if (slot)
1389 radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +09001390 mutex_unlock(&d->domain->revmap_tree_mutex);
David Daney495c38d2017-08-17 17:53:34 -07001391}
1392
1393/**
1394 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1395 * @domain: Domain to push.
1396 * @virq: Irq to push the domain in to.
1397 * @arg: Passed to the irq_domain_ops alloc() function.
1398 *
1399 * For an already existing irqdomain hierarchy, as might be obtained
1400 * via a call to pci_enable_msix(), add an additional domain to the
1401 * head of the processing chain. Must be called before request_irq()
1402 * has been called.
1403 */
1404int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1405{
1406 struct irq_data *child_irq_data;
1407 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1408 struct irq_desc *desc;
1409 int rv = 0;
1410
1411 /*
1412 * Check that no action has been set, which indicates the virq
1413 * is in a state where this function doesn't have to deal with
1414 * races between interrupt handling and maintaining the
1415 * hierarchy. This will catch gross misuse. Attempting to
1416 * make the check race free would require holding locks across
1417 * calls to struct irq_domain_ops->alloc(), which could lead
1418 * to deadlock, so we just do a simple check before starting.
1419 */
1420 desc = irq_to_desc(virq);
1421 if (!desc)
1422 return -EINVAL;
1423 if (WARN_ON(desc->action))
1424 return -EBUSY;
1425
1426 if (domain == NULL)
1427 return -EINVAL;
1428
1429 if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1430 return -EINVAL;
1431
Dan Carpenter20c4d492017-08-25 15:14:09 +03001432 if (!root_irq_data)
David Daney495c38d2017-08-17 17:53:34 -07001433 return -EINVAL;
1434
Dan Carpenter20c4d492017-08-25 15:14:09 +03001435 if (domain->parent != root_irq_data->domain)
David Daney495c38d2017-08-17 17:53:34 -07001436 return -EINVAL;
1437
1438 child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1439 irq_data_get_node(root_irq_data));
1440 if (!child_irq_data)
1441 return -ENOMEM;
1442
1443 mutex_lock(&irq_domain_mutex);
1444
1445 /* Copy the original irq_data. */
1446 *child_irq_data = *root_irq_data;
1447
1448 /*
1449 * Overwrite the root_irq_data, which is embedded in struct
1450 * irq_desc, with values for this domain.
1451 */
1452 root_irq_data->parent_data = child_irq_data;
1453 root_irq_data->domain = domain;
1454 root_irq_data->mask = 0;
1455 root_irq_data->hwirq = 0;
1456 root_irq_data->chip = NULL;
1457 root_irq_data->chip_data = NULL;
1458
1459 /* May (probably does) set hwirq, chip, etc. */
1460 rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1461 if (rv) {
1462 /* Restore the original irq_data. */
1463 *root_irq_data = *child_irq_data;
Kevin Hao0f394da2020-01-20 12:35:47 +08001464 kfree(child_irq_data);
David Daney495c38d2017-08-17 17:53:34 -07001465 goto error;
1466 }
1467
1468 irq_domain_fix_revmap(child_irq_data);
1469 irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1470
1471error:
1472 mutex_unlock(&irq_domain_mutex);
1473
1474 return rv;
1475}
1476EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1477
1478/**
1479 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1480 * @domain: Domain to remove.
1481 * @virq: Irq to remove the domain from.
1482 *
1483 * Undo the effects of a call to irq_domain_push_irq(). Must be
1484 * called either before request_irq() or after free_irq().
1485 */
1486int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1487{
1488 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1489 struct irq_data *child_irq_data;
1490 struct irq_data *tmp_irq_data;
1491 struct irq_desc *desc;
1492
1493 /*
1494 * Check that no action is set, which indicates the virq is in
1495 * a state where this function doesn't have to deal with races
1496 * between interrupt handling and maintaining the hierarchy.
1497 * This will catch gross misuse. Attempting to make the check
1498 * race free would require holding locks across calls to
1499 * struct irq_domain_ops->free(), which could lead to
1500 * deadlock, so we just do a simple check before starting.
1501 */
1502 desc = irq_to_desc(virq);
1503 if (!desc)
1504 return -EINVAL;
1505 if (WARN_ON(desc->action))
1506 return -EBUSY;
1507
1508 if (domain == NULL)
1509 return -EINVAL;
1510
1511 if (!root_irq_data)
1512 return -EINVAL;
1513
1514 tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1515
1516 /* We can only "pop" if this domain is at the top of the list */
1517 if (WARN_ON(root_irq_data != tmp_irq_data))
1518 return -EINVAL;
1519
1520 if (WARN_ON(root_irq_data->domain != domain))
1521 return -EINVAL;
1522
1523 child_irq_data = root_irq_data->parent_data;
1524 if (WARN_ON(!child_irq_data))
1525 return -EINVAL;
1526
1527 mutex_lock(&irq_domain_mutex);
1528
1529 root_irq_data->parent_data = NULL;
1530
1531 irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1532 irq_domain_free_irqs_hierarchy(domain, virq, 1);
1533
1534 /* Restore the original irq_data. */
1535 *root_irq_data = *child_irq_data;
1536
1537 irq_domain_fix_revmap(root_irq_data);
1538
1539 mutex_unlock(&irq_domain_mutex);
1540
1541 kfree(child_irq_data);
1542
1543 return 0;
1544}
1545EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1546
Jiang Liuf8264e32014-11-06 22:20:14 +08001547/**
1548 * irq_domain_free_irqs - Free IRQ number and associated data structures
1549 * @virq: base IRQ number
1550 * @nr_irqs: number of IRQs to free
1551 */
1552void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1553{
1554 struct irq_data *data = irq_get_irq_data(virq);
1555 int i;
1556
1557 if (WARN(!data || !data->domain || !data->domain->ops->free,
1558 "NULL pointer, cannot free irq\n"))
1559 return;
1560
1561 mutex_lock(&irq_domain_mutex);
1562 for (i = 0; i < nr_irqs; i++)
1563 irq_domain_remove_irq(virq + i);
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001564 irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
Jiang Liuf8264e32014-11-06 22:20:14 +08001565 mutex_unlock(&irq_domain_mutex);
1566
1567 irq_domain_free_irq_data(virq, nr_irqs);
1568 irq_free_descs(virq, nr_irqs);
1569}
1570
1571/**
Jiang Liu36d72732014-11-15 22:24:01 +08001572 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1573 * @irq_base: Base IRQ number
1574 * @nr_irqs: Number of IRQs to allocate
1575 * @arg: Allocation data (arch/domain specific)
1576 *
1577 * Check whether the domain has been setup recursive. If not allocate
1578 * through the parent domain.
1579 */
1580int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1581 unsigned int irq_base, unsigned int nr_irqs,
1582 void *arg)
1583{
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001584 if (!domain->parent)
1585 return -ENOSYS;
Jiang Liu36d72732014-11-15 22:24:01 +08001586
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001587 return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1588 nr_irqs, arg);
Jiang Liu36d72732014-11-15 22:24:01 +08001589}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001590EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
Jiang Liu36d72732014-11-15 22:24:01 +08001591
1592/**
1593 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1594 * @irq_base: Base IRQ number
1595 * @nr_irqs: Number of IRQs to free
1596 *
1597 * Check whether the domain has been setup recursive. If not free
1598 * through the parent domain.
1599 */
1600void irq_domain_free_irqs_parent(struct irq_domain *domain,
1601 unsigned int irq_base, unsigned int nr_irqs)
1602{
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001603 if (!domain->parent)
1604 return;
1605
1606 irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
Jiang Liu36d72732014-11-15 22:24:01 +08001607}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001608EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
Jiang Liu36d72732014-11-15 22:24:01 +08001609
Marc Zyngier08d85f32017-01-17 16:00:48 +00001610static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1611{
1612 if (irq_data && irq_data->domain) {
1613 struct irq_domain *domain = irq_data->domain;
1614
1615 if (domain->ops->deactivate)
1616 domain->ops->deactivate(domain, irq_data);
1617 if (irq_data->parent_data)
1618 __irq_domain_deactivate_irq(irq_data->parent_data);
1619 }
1620}
1621
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001622static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001623{
1624 int ret = 0;
1625
1626 if (irqd && irqd->domain) {
1627 struct irq_domain *domain = irqd->domain;
1628
1629 if (irqd->parent_data)
Thomas Gleixner42e1cc22017-09-13 23:29:12 +02001630 ret = __irq_domain_activate_irq(irqd->parent_data,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001631 reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001632 if (!ret && domain->ops->activate) {
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001633 ret = domain->ops->activate(domain, irqd, reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001634 /* Rollback in case of error */
1635 if (ret && irqd->parent_data)
1636 __irq_domain_deactivate_irq(irqd->parent_data);
1637 }
1638 }
1639 return ret;
1640}
1641
Jiang Liu36d72732014-11-15 22:24:01 +08001642/**
Jiang Liuf8264e32014-11-06 22:20:14 +08001643 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1644 * interrupt
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001645 * @irq_data: Outermost irq_data associated with interrupt
1646 * @reserve: If set only reserve an interrupt vector instead of assigning one
Jiang Liuf8264e32014-11-06 22:20:14 +08001647 *
1648 * This is the second step to call domain_ops->activate to program interrupt
1649 * controllers, so the interrupt could actually get delivered.
1650 */
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001651int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
Jiang Liuf8264e32014-11-06 22:20:14 +08001652{
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001653 int ret = 0;
1654
1655 if (!irqd_is_activated(irq_data))
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001656 ret = __irq_domain_activate_irq(irq_data, reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001657 if (!ret)
Marc Zyngier08d85f32017-01-17 16:00:48 +00001658 irqd_set_activated(irq_data);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001659 return ret;
Jiang Liuf8264e32014-11-06 22:20:14 +08001660}
1661
1662/**
1663 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1664 * deactivate interrupt
1665 * @irq_data: outermost irq_data associated with interrupt
1666 *
1667 * It calls domain_ops->deactivate to program interrupt controllers to disable
1668 * interrupt delivery.
1669 */
1670void irq_domain_deactivate_irq(struct irq_data *irq_data)
1671{
Marc Zyngier08d85f32017-01-17 16:00:48 +00001672 if (irqd_is_activated(irq_data)) {
1673 __irq_domain_deactivate_irq(irq_data);
1674 irqd_clr_activated(irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001675 }
1676}
1677
1678static void irq_domain_check_hierarchy(struct irq_domain *domain)
1679{
1680 /* Hierarchy irq_domains must implement callback alloc() */
1681 if (domain->ops->alloc)
1682 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1683}
Eric Auger631a9632017-01-19 20:57:57 +00001684
1685/**
1686 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
1687 * parent has MSI remapping support
1688 * @domain: domain pointer
1689 */
1690bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
1691{
1692 for (; domain; domain = domain->parent) {
1693 if (irq_domain_is_msi_remap(domain))
1694 return true;
1695 }
1696 return false;
1697}
Jiang Liuf8264e32014-11-06 22:20:14 +08001698#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1699/**
1700 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1701 * @domain: domain to match
1702 * @virq: IRQ number to get irq_data
1703 */
1704struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1705 unsigned int virq)
1706{
1707 struct irq_data *irq_data = irq_get_irq_data(virq);
1708
1709 return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1710}
Jake Oshinsa4289dc2015-12-10 17:52:59 +00001711EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001712
Stefan Agner5f22f5c2015-05-16 11:44:13 +02001713/**
1714 * irq_domain_set_info - Set the complete data for a @virq in @domain
1715 * @domain: Interrupt domain to match
1716 * @virq: IRQ number
1717 * @hwirq: The hardware interrupt number
1718 * @chip: The associated interrupt chip
1719 * @chip_data: The associated interrupt chip data
1720 * @handler: The interrupt flow handler
1721 * @handler_data: The interrupt flow handler data
1722 * @handler_name: The interrupt handler name
1723 */
1724void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1725 irq_hw_number_t hwirq, struct irq_chip *chip,
1726 void *chip_data, irq_flow_handler_t handler,
1727 void *handler_data, const char *handler_name)
1728{
1729 irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
1730 irq_set_chip_data(virq, chip_data);
1731 irq_set_handler_data(virq, handler_data);
1732}
1733
Jiang Liuf8264e32014-11-06 22:20:14 +08001734static void irq_domain_check_hierarchy(struct irq_domain *domain)
1735{
1736}
1737#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001738
1739#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1740static struct dentry *domain_dir;
1741
1742static void
1743irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1744{
1745 seq_printf(m, "%*sname: %s\n", ind, "", d->name);
1746 seq_printf(m, "%*ssize: %u\n", ind + 1, "",
1747 d->revmap_size + d->revmap_direct_max_irq);
1748 seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1749 seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
Thomas Gleixnerc3e72392017-09-13 23:29:06 +02001750 if (d->ops && d->ops->debug_show)
1751 d->ops->debug_show(m, d, NULL, ind + 1);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001752#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1753 if (!d->parent)
1754 return;
1755 seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
1756 irq_domain_debug_show_one(m, d->parent, ind + 4);
1757#endif
1758}
1759
1760static int irq_domain_debug_show(struct seq_file *m, void *p)
1761{
1762 struct irq_domain *d = m->private;
1763
1764 /* Default domain? Might be NULL */
1765 if (!d) {
1766 if (!irq_default_domain)
1767 return 0;
1768 d = irq_default_domain;
1769 }
1770 irq_domain_debug_show_one(m, d, 0);
1771 return 0;
1772}
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001773DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001774
1775static void debugfs_add_domain_dir(struct irq_domain *d)
1776{
1777 if (!d->name || !domain_dir || d->debugfs_file)
1778 return;
1779 d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001780 &irq_domain_debug_fops);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001781}
1782
1783static void debugfs_remove_domain_dir(struct irq_domain *d)
1784{
Thomas Gleixnerf610c9d2017-07-07 08:57:57 +02001785 debugfs_remove(d->debugfs_file);
Marc Zyngier513145e2018-10-01 11:05:21 +01001786 d->debugfs_file = NULL;
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001787}
1788
1789void __init irq_domain_debugfs_init(struct dentry *root)
1790{
1791 struct irq_domain *d;
1792
1793 domain_dir = debugfs_create_dir("domains", root);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001794
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001795 debugfs_create_file("default", 0444, domain_dir, NULL,
1796 &irq_domain_debug_fops);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001797 mutex_lock(&irq_domain_mutex);
1798 list_for_each_entry(d, &irq_domain_list, link)
1799 debugfs_add_domain_dir(d);
1800 mutex_unlock(&irq_domain_mutex);
1801}
1802#endif