blob: 82b8b18ee1ebcdf19e873b5bb3d4ab9f2748f539 [file] [log] [blame]
Paul Mundt54a90582012-05-19 15:11:47 +09001#define pr_fmt(fmt) "irq: " fmt
2
Marc Zyngierc5c601c2017-07-07 09:39:59 +01003#include <linux/acpi.h>
Grant Likelycc79ca62012-02-16 01:37:49 -07004#include <linux/debugfs.h>
5#include <linux/hardirq.h>
6#include <linux/interrupt.h>
Grant Likely08a543a2011-07-26 03:19:06 -06007#include <linux/irq.h>
Grant Likelycc79ca62012-02-16 01:37:49 -07008#include <linux/irqdesc.h>
Grant Likely08a543a2011-07-26 03:19:06 -06009#include <linux/irqdomain.h>
10#include <linux/module.h>
11#include <linux/mutex.h>
12#include <linux/of.h>
Grant Likely7e713302011-07-26 03:19:06 -060013#include <linux/of_address.h>
Rashika Kheria64be38a2014-02-27 17:10:12 +053014#include <linux/of_irq.h>
Paul Mundt5ca4db62012-06-03 22:04:34 -070015#include <linux/topology.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070016#include <linux/seq_file.h>
Grant Likely7e713302011-07-26 03:19:06 -060017#include <linux/slab.h>
Grant Likelycc79ca62012-02-16 01:37:49 -070018#include <linux/smp.h>
19#include <linux/fs.h>
Grant Likely08a543a2011-07-26 03:19:06 -060020
21static LIST_HEAD(irq_domain_list);
22static DEFINE_MUTEX(irq_domain_mutex);
23
Grant Likely68700652012-02-14 14:06:53 -070024static struct irq_domain *irq_default_domain;
Grant Likelycc79ca62012-02-16 01:37:49 -070025
Jiang Liuf8264e32014-11-06 22:20:14 +080026static void irq_domain_check_hierarchy(struct irq_domain *domain);
27
Marc Zyngierb145dcc2015-10-13 12:51:36 +010028struct irqchip_fwid {
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020029 struct fwnode_handle fwnode;
30 unsigned int type;
31 char *name;
Thomas Gleixner087cdfb2017-06-20 01:37:17 +020032 void *data;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010033};
34
Thomas Gleixner087cdfb2017-06-20 01:37:17 +020035#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
36static void debugfs_add_domain_dir(struct irq_domain *d);
37static void debugfs_remove_domain_dir(struct irq_domain *d);
38#else
39static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
40static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
41#endif
42
Sakari Ailusdb3e50f2017-07-21 14:39:31 +030043const struct fwnode_operations irqchip_fwnode_ops;
Arnd Bergmannb6eb66f2017-07-26 02:19:35 +020044EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
Sakari Ailusdb3e50f2017-07-21 14:39:31 +030045
Marc Zyngierb145dcc2015-10-13 12:51:36 +010046/**
47 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
48 * identifying an irq domain
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020049 * @type: Type of irqchip_fwnode. See linux/irqdomain.h
50 * @name: Optional user provided domain name
51 * @id: Optional user provided id if name != NULL
52 * @data: Optional user-provided data
Marc Zyngierb145dcc2015-10-13 12:51:36 +010053 *
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020054 * Allocate a struct irqchip_fwid, and return a poiner to the embedded
Marc Zyngierb145dcc2015-10-13 12:51:36 +010055 * fwnode_handle (or NULL on failure).
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020056 *
57 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
58 * solely to transport name information to irqdomain creation code. The
59 * node is not stored. For other types the pointer is kept in the irq
60 * domain struct.
Marc Zyngierb145dcc2015-10-13 12:51:36 +010061 */
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020062struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
63 const char *name, void *data)
Marc Zyngierb145dcc2015-10-13 12:51:36 +010064{
65 struct irqchip_fwid *fwid;
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020066 char *n;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010067
68 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010069
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020070 switch (type) {
71 case IRQCHIP_FWNODE_NAMED:
72 n = kasprintf(GFP_KERNEL, "%s", name);
73 break;
74 case IRQCHIP_FWNODE_NAMED_ID:
75 n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
76 break;
77 default:
78 n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
79 break;
80 }
81
82 if (!fwid || !n) {
Marc Zyngierb145dcc2015-10-13 12:51:36 +010083 kfree(fwid);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020084 kfree(n);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010085 return NULL;
86 }
87
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020088 fwid->type = type;
89 fwid->name = n;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010090 fwid->data = data;
Sakari Ailusdb3e50f2017-07-21 14:39:31 +030091 fwid->fwnode.ops = &irqchip_fwnode_ops;
Marc Zyngierb145dcc2015-10-13 12:51:36 +010092 return &fwid->fwnode;
93}
Thomas Gleixnerd59f6612017-06-20 01:37:05 +020094EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
Marc Zyngierb145dcc2015-10-13 12:51:36 +010095
96/**
97 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
98 *
99 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
100 */
101void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
102{
103 struct irqchip_fwid *fwid;
104
Suravee Suthikulpanit75aba7b2015-12-10 08:55:28 -0800105 if (WARN_ON(!is_fwnode_irqchip(fwnode)))
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100106 return;
107
108 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
109 kfree(fwid->name);
110 kfree(fwid);
111}
Jake Oshinsa4289dc2015-12-10 17:52:59 +0000112EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
Marc Zyngierb145dcc2015-10-13 12:51:36 +0100113
Grant Likelycc79ca62012-02-16 01:37:49 -0700114/**
Grant Likelyfa40f372013-06-08 12:57:40 +0100115 * __irq_domain_add() - Allocate a new irq_domain data structure
Punit Agrawal545d5d62016-05-31 13:56:48 +0100116 * @fwnode: firmware node for the interrupt controller
Grant Likelyfa40f372013-06-08 12:57:40 +0100117 * @size: Size of linear map; 0 for radix mapping only
Jiang Liua2579542014-05-27 16:07:37 +0800118 * @hwirq_max: Maximum number of interrupts supported by controller
Grant Likelyfa40f372013-06-08 12:57:40 +0100119 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
120 * direct mapping
Jiang Liuf8264e32014-11-06 22:20:14 +0800121 * @ops: domain callbacks
Grant Likelya8db8cf2012-02-14 14:06:54 -0700122 * @host_data: Controller private data pointer
Grant Likelycc79ca62012-02-16 01:37:49 -0700123 *
Jiang Liua2579542014-05-27 16:07:37 +0800124 * Allocates and initialize and irq_domain structure.
125 * Returns pointer to IRQ domain, or NULL on failure.
Grant Likelycc79ca62012-02-16 01:37:49 -0700126 */
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100127struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
Grant Likelyddaf1442013-06-10 01:06:02 +0100128 irq_hw_number_t hwirq_max, int direct_max,
Grant Likelyfa40f372013-06-08 12:57:40 +0100129 const struct irq_domain_ops *ops,
130 void *host_data)
Grant Likelycc79ca62012-02-16 01:37:49 -0700131{
Punit Agrawal545d5d62016-05-31 13:56:48 +0100132 struct device_node *of_node = to_of_node(fwnode);
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200133 struct irqchip_fwid *fwid;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700134 struct irq_domain *domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700135
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200136 static atomic_t unknown_domains;
137
Grant Likelycef50752012-07-11 17:24:31 +0100138 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
139 GFP_KERNEL, of_node_to_nid(of_node));
Grant Likelya8db8cf2012-02-14 14:06:54 -0700140 if (WARN_ON(!domain))
Grant Likelycc79ca62012-02-16 01:37:49 -0700141 return NULL;
142
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200143 if (fwnode && is_fwnode_irqchip(fwnode)) {
144 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
145
146 switch (fwid->type) {
147 case IRQCHIP_FWNODE_NAMED:
148 case IRQCHIP_FWNODE_NAMED_ID:
149 domain->name = kstrdup(fwid->name, GFP_KERNEL);
150 if (!domain->name) {
151 kfree(domain);
152 return NULL;
153 }
154 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
155 break;
156 default:
157 domain->fwnode = fwnode;
158 domain->name = fwid->name;
159 break;
160 }
Marc Zyngierc5c601c2017-07-07 09:39:59 +0100161#ifdef CONFIG_ACPI
162 } else if (is_acpi_device_node(fwnode)) {
163 struct acpi_buffer buf = {
164 .length = ACPI_ALLOCATE_BUFFER,
165 };
166 acpi_handle handle;
167
168 handle = acpi_device_handle(to_acpi_device_node(fwnode));
169 if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
170 domain->name = buf.pointer;
171 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
172 }
173
174 domain->fwnode = fwnode;
175#endif
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200176 } else if (of_node) {
177 char *name;
178
179 /*
180 * DT paths contain '/', which debugfs is legitimately
181 * unhappy about. Replace them with ':', which does
182 * the trick and is not as offensive as '\'...
183 */
184 name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
185 if (!name) {
186 kfree(domain);
187 return NULL;
188 }
189
190 strreplace(name, '/', ':');
191
192 domain->name = name;
193 domain->fwnode = fwnode;
194 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
195 }
196
197 if (!domain->name) {
Sakari Ailusdb3e50f2017-07-21 14:39:31 +0300198 if (fwnode)
199 pr_err("Invalid fwnode type for irqdomain\n");
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200200 domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
201 atomic_inc_return(&unknown_domains));
202 if (!domain->name) {
203 kfree(domain);
204 return NULL;
205 }
206 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
207 }
208
Marc Zyngierf1107112015-10-13 12:51:30 +0100209 of_node_get(of_node);
Marc Zyngierf1107112015-10-13 12:51:30 +0100210
Grant Likelycc79ca62012-02-16 01:37:49 -0700211 /* Fill structure */
Grant Likely1aa0dd92013-06-08 12:03:59 +0100212 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900213 mutex_init(&domain->revmap_tree_mutex);
Grant Likely68700652012-02-14 14:06:53 -0700214 domain->ops = ops;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700215 domain->host_data = host_data;
Grant Likelyddaf1442013-06-10 01:06:02 +0100216 domain->hwirq_max = hwirq_max;
Grant Likely1aa0dd92013-06-08 12:03:59 +0100217 domain->revmap_size = size;
Grant Likelyfa40f372013-06-08 12:57:40 +0100218 domain->revmap_direct_max_irq = direct_max;
Jiang Liuf8264e32014-11-06 22:20:14 +0800219 irq_domain_check_hierarchy(domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700220
Grant Likelya8db8cf2012-02-14 14:06:54 -0700221 mutex_lock(&irq_domain_mutex);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +0200222 debugfs_add_domain_dir(domain);
Grant Likelya8db8cf2012-02-14 14:06:54 -0700223 list_add(&domain->link, &irq_domain_list);
224 mutex_unlock(&irq_domain_mutex);
Grant Likelyfa40f372013-06-08 12:57:40 +0100225
Grant Likely1aa0dd92013-06-08 12:03:59 +0100226 pr_debug("Added domain %s\n", domain->name);
Grant Likelyfa40f372013-06-08 12:57:40 +0100227 return domain;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700228}
Grant Likelyfa40f372013-06-08 12:57:40 +0100229EXPORT_SYMBOL_GPL(__irq_domain_add);
Grant Likelya8db8cf2012-02-14 14:06:54 -0700230
Paul Mundt58ee99a2012-05-19 15:11:41 +0900231/**
232 * irq_domain_remove() - Remove an irq domain.
233 * @domain: domain to remove
234 *
235 * This routine is used to remove an irq domain. The caller must ensure
236 * that all mappings within the domain have been disposed of prior to
237 * use, depending on the revmap type.
238 */
239void irq_domain_remove(struct irq_domain *domain)
240{
241 mutex_lock(&irq_domain_mutex);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +0200242 debugfs_remove_domain_dir(domain);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900243
Matthew Wilcoxe9256ef2016-05-20 17:01:33 -0700244 WARN_ON(!radix_tree_empty(&domain->revmap_tree));
Paul Mundt58ee99a2012-05-19 15:11:41 +0900245
246 list_del(&domain->link);
247
248 /*
249 * If the going away domain is the default one, reset it.
250 */
251 if (unlikely(irq_default_domain == domain))
252 irq_set_default_host(NULL);
253
254 mutex_unlock(&irq_domain_mutex);
255
Grant Likely1aa0dd92013-06-08 12:03:59 +0100256 pr_debug("Removed domain %s\n", domain->name);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900257
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100258 of_node_put(irq_domain_get_of_node(domain));
Thomas Gleixnerd59f6612017-06-20 01:37:05 +0200259 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
260 kfree(domain->name);
Grant Likelyfa40f372013-06-08 12:57:40 +0100261 kfree(domain);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900262}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900263EXPORT_SYMBOL_GPL(irq_domain_remove);
Paul Mundt58ee99a2012-05-19 15:11:41 +0900264
Marc Zyngier61d0a002017-06-22 11:34:57 +0100265void irq_domain_update_bus_token(struct irq_domain *domain,
266 enum irq_domain_bus_token bus_token)
267{
268 char *name;
269
270 if (domain->bus_token == bus_token)
271 return;
272
273 mutex_lock(&irq_domain_mutex);
274
275 domain->bus_token = bus_token;
276
277 name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
278 if (!name) {
279 mutex_unlock(&irq_domain_mutex);
280 return;
281 }
282
283 debugfs_remove_domain_dir(domain);
284
285 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
286 kfree(domain->name);
287 else
288 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
289
290 domain->name = name;
291 debugfs_add_domain_dir(domain);
292
293 mutex_unlock(&irq_domain_mutex);
294}
295
Grant Likelya8db8cf2012-02-14 14:06:54 -0700296/**
Grant Likelyfa40f372013-06-08 12:57:40 +0100297 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
Mark Brown781d0f42012-07-05 12:19:19 +0100298 * @of_node: pointer to interrupt controller's device tree node.
299 * @size: total number of irqs in mapping
Linus Walleij94a63da2013-06-06 12:10:23 +0100300 * @first_irq: first number of irq block assigned to the domain,
Grant Likelyfa40f372013-06-08 12:57:40 +0100301 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
302 * pre-map all of the irqs in the domain to virqs starting at first_irq.
Jiang Liuf8264e32014-11-06 22:20:14 +0800303 * @ops: domain callbacks
Mark Brown781d0f42012-07-05 12:19:19 +0100304 * @host_data: Controller private data pointer
305 *
Grant Likelyfa40f372013-06-08 12:57:40 +0100306 * Allocates an irq_domain, and optionally if first_irq is positive then also
307 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
Mark Brown781d0f42012-07-05 12:19:19 +0100308 *
309 * This is intended to implement the expected behaviour for most
Grant Likelyfa40f372013-06-08 12:57:40 +0100310 * interrupt controllers. If device tree is used, then first_irq will be 0 and
311 * irqs get mapped dynamically on the fly. However, if the controller requires
312 * static virq assignments (non-DT boot) then it will set that up correctly.
Mark Brown781d0f42012-07-05 12:19:19 +0100313 */
314struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
315 unsigned int size,
316 unsigned int first_irq,
317 const struct irq_domain_ops *ops,
318 void *host_data)
319{
Grant Likelyfa40f372013-06-08 12:57:40 +0100320 struct irq_domain *domain;
Linus Walleij2854d162012-09-27 14:59:39 +0200321
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100322 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
Grant Likelyfa40f372013-06-08 12:57:40 +0100323 if (!domain)
324 return NULL;
325
326 if (first_irq > 0) {
Linus Walleij2854d162012-09-27 14:59:39 +0200327 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
Grant Likelyfa40f372013-06-08 12:57:40 +0100328 /* attempt to allocated irq_descs */
329 int rc = irq_alloc_descs(first_irq, first_irq, size,
330 of_node_to_nid(of_node));
331 if (rc < 0)
Linus Walleijd202b7b2012-11-27 01:20:32 +0100332 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
333 first_irq);
Grant Likelyfa40f372013-06-08 12:57:40 +0100334 }
Grant Likelyddaf1442013-06-10 01:06:02 +0100335 irq_domain_associate_many(domain, first_irq, 0, size);
Linus Walleij2854d162012-09-27 14:59:39 +0200336 }
337
Grant Likelyfa40f372013-06-08 12:57:40 +0100338 return domain;
Mark Brown781d0f42012-07-05 12:19:19 +0100339}
Arnd Bergmann346dbb72013-04-25 19:28:54 +0200340EXPORT_SYMBOL_GPL(irq_domain_add_simple);
Mark Brown781d0f42012-07-05 12:19:19 +0100341
342/**
Grant Likelya8db8cf2012-02-14 14:06:54 -0700343 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
344 * @of_node: pointer to interrupt controller's device tree node.
Grant Likely1bc04f22012-02-14 14:06:55 -0700345 * @size: total number of irqs in legacy mapping
346 * @first_irq: first number of irq block assigned to the domain
347 * @first_hwirq: first hwirq number to use for the translation. Should normally
348 * be '0', but a positive integer can be used if the effective
349 * hwirqs numbering does not begin at zero.
Grant Likelya8db8cf2012-02-14 14:06:54 -0700350 * @ops: map/unmap domain callbacks
351 * @host_data: Controller private data pointer
352 *
353 * Note: the map() callback will be called before this function returns
354 * for all legacy interrupts except 0 (which is always the invalid irq for
355 * a legacy controller).
356 */
357struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
Grant Likely1bc04f22012-02-14 14:06:55 -0700358 unsigned int size,
359 unsigned int first_irq,
360 irq_hw_number_t first_hwirq,
Grant Likelya18dc812012-01-26 12:12:14 -0700361 const struct irq_domain_ops *ops,
Grant Likelya8db8cf2012-02-14 14:06:54 -0700362 void *host_data)
363{
Grant Likely1bc04f22012-02-14 14:06:55 -0700364 struct irq_domain *domain;
Grant Likelya8db8cf2012-02-14 14:06:54 -0700365
Marc Zyngier1bf4ddc2015-10-13 12:51:35 +0100366 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
Grant Likelyddaf1442013-06-10 01:06:02 +0100367 first_hwirq + size, 0, ops, host_data);
Jiang Liuf8264e32014-11-06 22:20:14 +0800368 if (domain)
369 irq_domain_associate_many(domain, first_irq, first_hwirq, size);
Grant Likely1bc04f22012-02-14 14:06:55 -0700370
Grant Likelya8db8cf2012-02-14 14:06:54 -0700371 return domain;
372}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900373EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
Grant Likelycc79ca62012-02-16 01:37:49 -0700374
Grant Likelya8db8cf2012-02-14 14:06:54 -0700375/**
Marc Zyngier651e8b52016-04-11 09:57:51 +0100376 * irq_find_matching_fwspec() - Locates a domain for a given fwspec
377 * @fwspec: FW specifier for an interrupt
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100378 * @bus_token: domain-specific data
Grant Likelycc79ca62012-02-16 01:37:49 -0700379 */
Marc Zyngier651e8b52016-04-11 09:57:51 +0100380struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
Marc Zyngier130b8c62015-10-13 12:51:31 +0100381 enum irq_domain_bus_token bus_token)
Grant Likelycc79ca62012-02-16 01:37:49 -0700382{
383 struct irq_domain *h, *found = NULL;
Marc Zyngier651e8b52016-04-11 09:57:51 +0100384 struct fwnode_handle *fwnode = fwspec->fwnode;
Grant Likelya18dc812012-01-26 12:12:14 -0700385 int rc;
Grant Likelycc79ca62012-02-16 01:37:49 -0700386
387 /* We might want to match the legacy controller last since
388 * it might potentially be set to match all interrupts in
389 * the absence of a device node. This isn't a problem so far
390 * yet though...
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100391 *
392 * bus_token == DOMAIN_BUS_ANY matches any domain, any other
393 * values must generate an exact match for the domain to be
394 * selected.
Grant Likelycc79ca62012-02-16 01:37:49 -0700395 */
396 mutex_lock(&irq_domain_mutex);
Grant Likelya18dc812012-01-26 12:12:14 -0700397 list_for_each_entry(h, &irq_domain_list, link) {
Marc Zyngier651e8b52016-04-11 09:57:51 +0100398 if (h->ops->select && fwspec->param_count)
399 rc = h->ops->select(h, fwspec, bus_token);
400 else if (h->ops->match)
Marc Zyngier130b8c62015-10-13 12:51:31 +0100401 rc = h->ops->match(h, to_of_node(fwnode), bus_token);
Grant Likelya18dc812012-01-26 12:12:14 -0700402 else
Marc Zyngier130b8c62015-10-13 12:51:31 +0100403 rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
Marc Zyngierad3aedf2015-07-28 14:46:08 +0100404 ((bus_token == DOMAIN_BUS_ANY) ||
405 (h->bus_token == bus_token)));
Grant Likelya18dc812012-01-26 12:12:14 -0700406
407 if (rc) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700408 found = h;
409 break;
410 }
Grant Likelya18dc812012-01-26 12:12:14 -0700411 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700412 mutex_unlock(&irq_domain_mutex);
413 return found;
414}
Marc Zyngier651e8b52016-04-11 09:57:51 +0100415EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
Grant Likelycc79ca62012-02-16 01:37:49 -0700416
417/**
Eric Augerc7b41f02017-01-19 20:57:59 +0000418 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
419 * IRQ remapping
420 *
421 * Return: false if any MSI irq domain does not support IRQ remapping,
422 * true otherwise (including if there is no MSI irq domain)
423 */
424bool irq_domain_check_msi_remap(void)
425{
426 struct irq_domain *h;
427 bool ret = true;
428
429 mutex_lock(&irq_domain_mutex);
430 list_for_each_entry(h, &irq_domain_list, link) {
431 if (irq_domain_is_msi(h) &&
432 !irq_domain_hierarchical_is_msi_remap(h)) {
433 ret = false;
434 break;
435 }
436 }
437 mutex_unlock(&irq_domain_mutex);
438 return ret;
439}
440EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
441
442/**
Grant Likelycc79ca62012-02-16 01:37:49 -0700443 * irq_set_default_host() - Set a "default" irq domain
Grant Likely68700652012-02-14 14:06:53 -0700444 * @domain: default domain pointer
Grant Likelycc79ca62012-02-16 01:37:49 -0700445 *
446 * For convenience, it's possible to set a "default" domain that will be used
447 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
448 * platforms that want to manipulate a few hard coded interrupt numbers that
449 * aren't properly represented in the device-tree.
450 */
Grant Likely68700652012-02-14 14:06:53 -0700451void irq_set_default_host(struct irq_domain *domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700452{
Paul Mundt54a90582012-05-19 15:11:47 +0900453 pr_debug("Default domain set to @0x%p\n", domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700454
Grant Likely68700652012-02-14 14:06:53 -0700455 irq_default_domain = domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700456}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900457EXPORT_SYMBOL_GPL(irq_set_default_host);
Grant Likelycc79ca62012-02-16 01:37:49 -0700458
David Daneyb526adf2017-08-17 17:53:32 -0700459static void irq_domain_clear_mapping(struct irq_domain *domain,
460 irq_hw_number_t hwirq)
461{
462 if (hwirq < domain->revmap_size) {
463 domain->linear_revmap[hwirq] = 0;
464 } else {
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900465 mutex_lock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700466 radix_tree_delete(&domain->revmap_tree, hwirq);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900467 mutex_unlock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700468 }
469}
470
471static void irq_domain_set_mapping(struct irq_domain *domain,
472 irq_hw_number_t hwirq,
473 struct irq_data *irq_data)
474{
475 if (hwirq < domain->revmap_size) {
476 domain->linear_revmap[hwirq] = irq_data->irq;
477 } else {
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900478 mutex_lock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700479 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +0900480 mutex_unlock(&domain->revmap_tree_mutex);
David Daneyb526adf2017-08-17 17:53:32 -0700481 }
482}
483
Jiang Liu43a77592014-06-09 16:20:05 +0800484void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
Grant Likely913af202012-06-03 22:04:35 -0700485{
Grant Likelyddaf1442013-06-10 01:06:02 +0100486 struct irq_data *irq_data = irq_get_irq_data(irq);
487 irq_hw_number_t hwirq;
Grant Likely913af202012-06-03 22:04:35 -0700488
Grant Likelyddaf1442013-06-10 01:06:02 +0100489 if (WARN(!irq_data || irq_data->domain != domain,
490 "virq%i doesn't exist; cannot disassociate\n", irq))
491 return;
Grant Likely913af202012-06-03 22:04:35 -0700492
Grant Likelyddaf1442013-06-10 01:06:02 +0100493 hwirq = irq_data->hwirq;
494 irq_set_status_flags(irq, IRQ_NOREQUEST);
Grant Likely913af202012-06-03 22:04:35 -0700495
Grant Likelyddaf1442013-06-10 01:06:02 +0100496 /* remove chip and handler */
497 irq_set_chip_and_handler(irq, NULL, NULL);
Grant Likely913af202012-06-03 22:04:35 -0700498
Grant Likelyddaf1442013-06-10 01:06:02 +0100499 /* Make sure it's completed */
500 synchronize_irq(irq);
Grant Likely913af202012-06-03 22:04:35 -0700501
Grant Likelyddaf1442013-06-10 01:06:02 +0100502 /* Tell the PIC about it */
503 if (domain->ops->unmap)
504 domain->ops->unmap(domain, irq);
505 smp_mb();
Grant Likely913af202012-06-03 22:04:35 -0700506
Grant Likelyddaf1442013-06-10 01:06:02 +0100507 irq_data->domain = NULL;
508 irq_data->hwirq = 0;
Thomas Gleixner9dc6be32017-06-20 01:37:16 +0200509 domain->mapcount--;
Grant Likely913af202012-06-03 22:04:35 -0700510
Grant Likelyddaf1442013-06-10 01:06:02 +0100511 /* Clear reverse map for this hwirq */
David Daneyb526adf2017-08-17 17:53:32 -0700512 irq_domain_clear_mapping(domain, hwirq);
Grant Likely913af202012-06-03 22:04:35 -0700513}
514
Grant Likelyddaf1442013-06-10 01:06:02 +0100515int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
516 irq_hw_number_t hwirq)
Grant Likelycc79ca62012-02-16 01:37:49 -0700517{
Grant Likelyddaf1442013-06-10 01:06:02 +0100518 struct irq_data *irq_data = irq_get_irq_data(virq);
519 int ret;
520
521 if (WARN(hwirq >= domain->hwirq_max,
522 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
523 return -EINVAL;
524 if (WARN(!irq_data, "error: virq%i is not allocated", virq))
525 return -EINVAL;
526 if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
527 return -EINVAL;
528
529 mutex_lock(&irq_domain_mutex);
530 irq_data->hwirq = hwirq;
531 irq_data->domain = domain;
532 if (domain->ops->map) {
533 ret = domain->ops->map(domain, virq, hwirq);
534 if (ret != 0) {
535 /*
536 * If map() returns -EPERM, this interrupt is protected
537 * by the firmware or some other service and shall not
538 * be mapped. Don't bother telling the user about it.
539 */
540 if (ret != -EPERM) {
541 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
542 domain->name, hwirq, virq, ret);
543 }
544 irq_data->domain = NULL;
545 irq_data->hwirq = 0;
546 mutex_unlock(&irq_domain_mutex);
547 return ret;
548 }
549
550 /* If not already assigned, give the domain the chip's name */
551 if (!domain->name && irq_data->chip)
552 domain->name = irq_data->chip->name;
553 }
554
Thomas Gleixner9dc6be32017-06-20 01:37:16 +0200555 domain->mapcount++;
David Daneyb526adf2017-08-17 17:53:32 -0700556 irq_domain_set_mapping(domain, hwirq, irq_data);
Grant Likelyddaf1442013-06-10 01:06:02 +0100557 mutex_unlock(&irq_domain_mutex);
558
559 irq_clear_status_flags(virq, IRQ_NOREQUEST);
560
561 return 0;
562}
563EXPORT_SYMBOL_GPL(irq_domain_associate);
564
565void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
566 irq_hw_number_t hwirq_base, int count)
567{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100568 struct device_node *of_node;
Grant Likelyddaf1442013-06-10 01:06:02 +0100569 int i;
Grant Likelycc79ca62012-02-16 01:37:49 -0700570
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100571 of_node = irq_domain_get_of_node(domain);
Grant Likely98aa4682012-06-17 16:17:04 -0600572 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100573 of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
Grant Likely98aa4682012-06-17 16:17:04 -0600574
575 for (i = 0; i < count; i++) {
Grant Likelyddaf1442013-06-10 01:06:02 +0100576 irq_domain_associate(domain, irq_base + i, hwirq_base + i);
Grant Likelycc79ca62012-02-16 01:37:49 -0700577 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700578}
Grant Likely98aa4682012-06-17 16:17:04 -0600579EXPORT_SYMBOL_GPL(irq_domain_associate_many);
Grant Likelycc79ca62012-02-16 01:37:49 -0700580
581/**
582 * irq_create_direct_mapping() - Allocate an irq for direct mapping
Grant Likely68700652012-02-14 14:06:53 -0700583 * @domain: domain to allocate the irq for or NULL for default domain
Grant Likelycc79ca62012-02-16 01:37:49 -0700584 *
585 * This routine is used for irq controllers which can choose the hardware
586 * interrupt numbers they generate. In such a case it's simplest to use
Grant Likely1aa0dd92013-06-08 12:03:59 +0100587 * the linux irq as the hardware interrupt number. It still uses the linear
588 * or radix tree to store the mapping, but the irq controller can optimize
589 * the revmap path by using the hwirq directly.
Grant Likelycc79ca62012-02-16 01:37:49 -0700590 */
Grant Likely68700652012-02-14 14:06:53 -0700591unsigned int irq_create_direct_mapping(struct irq_domain *domain)
Grant Likelycc79ca62012-02-16 01:37:49 -0700592{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100593 struct device_node *of_node;
Grant Likelycc79ca62012-02-16 01:37:49 -0700594 unsigned int virq;
595
Grant Likely68700652012-02-14 14:06:53 -0700596 if (domain == NULL)
597 domain = irq_default_domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700598
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100599 of_node = irq_domain_get_of_node(domain);
600 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
Grant Likely03848372012-02-14 14:06:52 -0700601 if (!virq) {
Paul Mundt54a90582012-05-19 15:11:47 +0900602 pr_debug("create_direct virq allocation failed\n");
Grant Likely03848372012-02-14 14:06:52 -0700603 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700604 }
Grant Likely1aa0dd92013-06-08 12:03:59 +0100605 if (virq >= domain->revmap_direct_max_irq) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700606 pr_err("ERROR: no free irqs available below %i maximum\n",
Grant Likely1aa0dd92013-06-08 12:03:59 +0100607 domain->revmap_direct_max_irq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700608 irq_free_desc(virq);
609 return 0;
610 }
Paul Mundt54a90582012-05-19 15:11:47 +0900611 pr_debug("create_direct obtained virq %d\n", virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700612
Grant Likely98aa4682012-06-17 16:17:04 -0600613 if (irq_domain_associate(domain, virq, virq)) {
Grant Likelycc79ca62012-02-16 01:37:49 -0700614 irq_free_desc(virq);
Grant Likely03848372012-02-14 14:06:52 -0700615 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700616 }
617
618 return virq;
619}
Paul Mundtecd84eb2012-05-19 15:11:42 +0900620EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
Grant Likelycc79ca62012-02-16 01:37:49 -0700621
622/**
623 * irq_create_mapping() - Map a hardware interrupt into linux irq space
Grant Likely68700652012-02-14 14:06:53 -0700624 * @domain: domain owning this hardware interrupt or NULL for default domain
625 * @hwirq: hardware irq number in that domain space
Grant Likelycc79ca62012-02-16 01:37:49 -0700626 *
627 * Only one mapping per hardware interrupt is permitted. Returns a linux
628 * irq number.
629 * If the sense/trigger is to be specified, set_irq_type() should be called
630 * on the number returned from that call.
631 */
Grant Likely68700652012-02-14 14:06:53 -0700632unsigned int irq_create_mapping(struct irq_domain *domain,
Grant Likelycc79ca62012-02-16 01:37:49 -0700633 irq_hw_number_t hwirq)
634{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100635 struct device_node *of_node;
David Daney5b7526e2012-04-05 16:52:13 -0700636 int virq;
Grant Likelycc79ca62012-02-16 01:37:49 -0700637
Paul Mundt54a90582012-05-19 15:11:47 +0900638 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700639
Grant Likely68700652012-02-14 14:06:53 -0700640 /* Look for default domain if nececssary */
641 if (domain == NULL)
642 domain = irq_default_domain;
643 if (domain == NULL) {
Kefeng Wang798f0fd2013-06-06 19:20:27 +0800644 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
Grant Likely03848372012-02-14 14:06:52 -0700645 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700646 }
Paul Mundt54a90582012-05-19 15:11:47 +0900647 pr_debug("-> using domain @%p\n", domain);
Grant Likelycc79ca62012-02-16 01:37:49 -0700648
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100649 of_node = irq_domain_get_of_node(domain);
650
Grant Likelycc79ca62012-02-16 01:37:49 -0700651 /* Check if mapping already exists */
Grant Likely68700652012-02-14 14:06:53 -0700652 virq = irq_find_mapping(domain, hwirq);
Grant Likely03848372012-02-14 14:06:52 -0700653 if (virq) {
Paul Mundt54a90582012-05-19 15:11:47 +0900654 pr_debug("-> existing mapping on virq %d\n", virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700655 return virq;
656 }
657
Grant Likely1bc04f22012-02-14 14:06:55 -0700658 /* Allocate a virtual interrupt number */
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900659 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
David Daney5b7526e2012-04-05 16:52:13 -0700660 if (virq <= 0) {
Paul Mundt54a90582012-05-19 15:11:47 +0900661 pr_debug("-> virq allocation failed\n");
Grant Likely1bc04f22012-02-14 14:06:55 -0700662 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700663 }
664
Grant Likely98aa4682012-06-17 16:17:04 -0600665 if (irq_domain_associate(domain, virq, hwirq)) {
Grant Likely73255702012-06-03 22:04:35 -0700666 irq_free_desc(virq);
Grant Likely03848372012-02-14 14:06:52 -0700667 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700668 }
669
Paul Mundt54a90582012-05-19 15:11:47 +0900670 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100671 hwirq, of_node_full_name(of_node), virq);
Grant Likelycc79ca62012-02-16 01:37:49 -0700672
673 return virq;
674}
675EXPORT_SYMBOL_GPL(irq_create_mapping);
676
Grant Likely98aa4682012-06-17 16:17:04 -0600677/**
678 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
679 * @domain: domain owning the interrupt range
680 * @irq_base: beginning of linux IRQ range
681 * @hwirq_base: beginning of hardware IRQ range
682 * @count: Number of interrupts to map
683 *
684 * This routine is used for allocating and mapping a range of hardware
685 * irqs to linux irqs where the linux irq numbers are at pre-defined
686 * locations. For use by controllers that already have static mappings
687 * to insert in to the domain.
688 *
689 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
690 * domain insertion.
691 *
692 * 0 is returned upon success, while any failure to establish a static
693 * mapping is treated as an error.
694 */
695int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
696 irq_hw_number_t hwirq_base, int count)
697{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100698 struct device_node *of_node;
Grant Likely98aa4682012-06-17 16:17:04 -0600699 int ret;
700
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100701 of_node = irq_domain_get_of_node(domain);
Grant Likely98aa4682012-06-17 16:17:04 -0600702 ret = irq_alloc_descs(irq_base, irq_base, count,
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100703 of_node_to_nid(of_node));
Grant Likely98aa4682012-06-17 16:17:04 -0600704 if (unlikely(ret < 0))
705 return ret;
706
Grant Likelyddaf1442013-06-10 01:06:02 +0100707 irq_domain_associate_many(domain, irq_base, hwirq_base, count);
Grant Likely98aa4682012-06-17 16:17:04 -0600708 return 0;
709}
710EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
711
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100712static int irq_domain_translate(struct irq_domain *d,
713 struct irq_fwspec *fwspec,
714 irq_hw_number_t *hwirq, unsigned int *type)
715{
716#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
717 if (d->ops->translate)
718 return d->ops->translate(d, fwspec, hwirq, type);
719#endif
720 if (d->ops->xlate)
721 return d->ops->xlate(d, to_of_node(fwspec->fwnode),
722 fwspec->param, fwspec->param_count,
723 hwirq, type);
724
725 /* If domain has no translation, then we assume interrupt line */
726 *hwirq = fwspec->param[0];
727 return 0;
728}
729
730static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
731 struct irq_fwspec *fwspec)
732{
733 int i;
734
735 fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
736 fwspec->param_count = irq_data->args_count;
737
738 for (i = 0; i < irq_data->args_count; i++)
739 fwspec->param[i] = irq_data->args[i];
740}
741
Marc Zyngierc0131f02015-10-13 12:51:34 +0100742unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
Grant Likelycc79ca62012-02-16 01:37:49 -0700743{
Grant Likely68700652012-02-14 14:06:53 -0700744 struct irq_domain *domain;
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100745 struct irq_data *irq_data;
Grant Likelycc79ca62012-02-16 01:37:49 -0700746 irq_hw_number_t hwirq;
747 unsigned int type = IRQ_TYPE_NONE;
Jiang Liuf8264e32014-11-06 22:20:14 +0800748 int virq;
Grant Likelycc79ca62012-02-16 01:37:49 -0700749
Marc Zyngier530cbe12016-01-26 13:52:25 +0000750 if (fwspec->fwnode) {
Marc Zyngier651e8b52016-04-11 09:57:51 +0100751 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
Marc Zyngier530cbe12016-01-26 13:52:25 +0000752 if (!domain)
Marc Zyngier651e8b52016-04-11 09:57:51 +0100753 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
Marc Zyngier530cbe12016-01-26 13:52:25 +0000754 } else {
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100755 domain = irq_default_domain;
Marc Zyngier530cbe12016-01-26 13:52:25 +0000756 }
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100757
Grant Likely68700652012-02-14 14:06:53 -0700758 if (!domain) {
Kefeng Wang798f0fd2013-06-06 19:20:27 +0800759 pr_warn("no irq domain found for %s !\n",
Marc Zyngierc0131f02015-10-13 12:51:34 +0100760 of_node_full_name(to_of_node(fwspec->fwnode)));
Grant Likely03848372012-02-14 14:06:52 -0700761 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700762 }
763
Marc Zyngierc0131f02015-10-13 12:51:34 +0100764 if (irq_domain_translate(domain, fwspec, &hwirq, &type))
Marc Zyngier11e4438e2015-10-13 12:51:32 +0100765 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700766
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100767 /*
768 * WARN if the irqchip returns a type with bits
769 * outside the sense mask set and clear these bits.
770 */
771 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
772 type &= IRQ_TYPE_SENSE_MASK;
773
774 /*
775 * If we've already configured this interrupt,
776 * don't do it again, or hell will break loose.
777 */
778 virq = irq_find_mapping(domain, hwirq);
779 if (virq) {
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800780 /*
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100781 * If the trigger type is not specified or matches the
782 * current trigger type then we are done so return the
783 * interrupt number.
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800784 */
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100785 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800786 return virq;
787
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100788 /*
789 * If the trigger type has not been set yet, then set
790 * it now and return the interrupt number.
791 */
792 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100793 irq_data = irq_get_irq_data(virq);
794 if (!irq_data)
795 return 0;
796
797 irqd_set_trigger_type(irq_data, type);
Jon Hunterb62b2cf2016-06-07 16:12:26 +0100798 return virq;
799 }
800
801 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
802 hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
803 return 0;
804 }
805
806 if (irq_domain_is_hierarchy(domain)) {
Marc Zyngierc0131f02015-10-13 12:51:34 +0100807 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
Yingjoe Chen0cc01ab2014-11-06 22:20:15 +0800808 if (virq <= 0)
809 return 0;
810 } else {
811 /* Create mapping */
812 virq = irq_create_mapping(domain, hwirq);
813 if (!virq)
814 return virq;
815 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700816
Jon Hunter1e2a7d72016-06-07 16:12:28 +0100817 irq_data = irq_get_irq_data(virq);
818 if (!irq_data) {
819 if (irq_domain_is_hierarchy(domain))
820 irq_domain_free_irqs(virq, 1);
821 else
822 irq_dispose_mapping(virq);
823 return 0;
824 }
825
826 /* Store trigger type */
827 irqd_set_trigger_type(irq_data, type);
828
Grant Likelycc79ca62012-02-16 01:37:49 -0700829 return virq;
830}
Marc Zyngierc0131f02015-10-13 12:51:34 +0100831EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
832
833unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
834{
835 struct irq_fwspec fwspec;
836
837 of_phandle_args_to_fwspec(irq_data, &fwspec);
838 return irq_create_fwspec_mapping(&fwspec);
839}
Grant Likelycc79ca62012-02-16 01:37:49 -0700840EXPORT_SYMBOL_GPL(irq_create_of_mapping);
841
842/**
843 * irq_dispose_mapping() - Unmap an interrupt
844 * @virq: linux irq number of the interrupt to unmap
845 */
846void irq_dispose_mapping(unsigned int virq)
847{
848 struct irq_data *irq_data = irq_get_irq_data(virq);
Grant Likely68700652012-02-14 14:06:53 -0700849 struct irq_domain *domain;
Grant Likelycc79ca62012-02-16 01:37:49 -0700850
Grant Likely03848372012-02-14 14:06:52 -0700851 if (!virq || !irq_data)
Grant Likelycc79ca62012-02-16 01:37:49 -0700852 return;
853
Grant Likely68700652012-02-14 14:06:53 -0700854 domain = irq_data->domain;
855 if (WARN_ON(domain == NULL))
Grant Likelycc79ca62012-02-16 01:37:49 -0700856 return;
857
Jon Hunterd16dcd3d2016-06-21 10:23:22 +0100858 if (irq_domain_is_hierarchy(domain)) {
859 irq_domain_free_irqs(virq, 1);
860 } else {
861 irq_domain_disassociate(domain, virq);
862 irq_free_desc(virq);
863 }
Grant Likelycc79ca62012-02-16 01:37:49 -0700864}
865EXPORT_SYMBOL_GPL(irq_dispose_mapping);
866
867/**
868 * irq_find_mapping() - Find a linux irq from an hw irq number.
Grant Likely68700652012-02-14 14:06:53 -0700869 * @domain: domain owning this hardware interrupt
870 * @hwirq: hardware irq number in that domain space
Grant Likelycc79ca62012-02-16 01:37:49 -0700871 */
Grant Likely68700652012-02-14 14:06:53 -0700872unsigned int irq_find_mapping(struct irq_domain *domain,
Grant Likelycc79ca62012-02-16 01:37:49 -0700873 irq_hw_number_t hwirq)
874{
Grant Likely4c0946c2012-06-03 22:04:39 -0700875 struct irq_data *data;
Grant Likelycc79ca62012-02-16 01:37:49 -0700876
Grant Likely68700652012-02-14 14:06:53 -0700877 /* Look for default domain if nececssary */
878 if (domain == NULL)
879 domain = irq_default_domain;
880 if (domain == NULL)
Grant Likely03848372012-02-14 14:06:52 -0700881 return 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700882
Grant Likely1aa0dd92013-06-08 12:03:59 +0100883 if (hwirq < domain->revmap_direct_max_irq) {
Jiang Liuf8264e32014-11-06 22:20:14 +0800884 data = irq_domain_get_irq_data(domain, hwirq);
885 if (data && data->hwirq == hwirq)
Grant Likely4c0946c2012-06-03 22:04:39 -0700886 return hwirq;
Grant Likely4c0946c2012-06-03 22:04:39 -0700887 }
888
Grant Likelyd3dcb432013-06-10 12:19:17 +0100889 /* Check if the hwirq is in the linear revmap. */
890 if (hwirq < domain->revmap_size)
891 return domain->linear_revmap[hwirq];
892
893 rcu_read_lock();
894 data = radix_tree_lookup(&domain->revmap_tree, hwirq);
895 rcu_read_unlock();
896 return data ? data->irq : 0;
Grant Likelycc79ca62012-02-16 01:37:49 -0700897}
898EXPORT_SYMBOL_GPL(irq_find_mapping);
899
Grant Likely16b2e6e2012-01-26 11:26:52 -0700900/**
901 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
902 *
903 * Device Tree IRQ specifier translation function which works with one cell
904 * bindings where the cell value maps directly to the hwirq number.
905 */
906int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
907 const u32 *intspec, unsigned int intsize,
908 unsigned long *out_hwirq, unsigned int *out_type)
Grant Likely7e713302011-07-26 03:19:06 -0600909{
Grant Likely16b2e6e2012-01-26 11:26:52 -0700910 if (WARN_ON(intsize < 1))
Grant Likely7e713302011-07-26 03:19:06 -0600911 return -EINVAL;
Grant Likely7e713302011-07-26 03:19:06 -0600912 *out_hwirq = intspec[0];
913 *out_type = IRQ_TYPE_NONE;
Grant Likely7e713302011-07-26 03:19:06 -0600914 return 0;
915}
Grant Likely16b2e6e2012-01-26 11:26:52 -0700916EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
917
918/**
919 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
920 *
921 * Device Tree IRQ specifier translation function which works with two cell
922 * bindings where the cell values map directly to the hwirq number
923 * and linux irq flags.
924 */
925int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
926 const u32 *intspec, unsigned int intsize,
927 irq_hw_number_t *out_hwirq, unsigned int *out_type)
928{
929 if (WARN_ON(intsize < 2))
930 return -EINVAL;
931 *out_hwirq = intspec[0];
932 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
933 return 0;
934}
935EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
936
937/**
938 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
939 *
940 * Device Tree IRQ specifier translation function which works with either one
941 * or two cell bindings where the cell values map directly to the hwirq number
942 * and linux irq flags.
943 *
944 * Note: don't use this function unless your interrupt controller explicitly
945 * supports both one and two cell bindings. For the majority of controllers
946 * the _onecell() or _twocell() variants above should be used.
947 */
948int irq_domain_xlate_onetwocell(struct irq_domain *d,
949 struct device_node *ctrlr,
950 const u32 *intspec, unsigned int intsize,
951 unsigned long *out_hwirq, unsigned int *out_type)
952{
953 if (WARN_ON(intsize < 1))
954 return -EINVAL;
955 *out_hwirq = intspec[0];
Sebastian Frias0c228912016-08-02 10:52:45 +0200956 if (intsize > 1)
957 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
958 else
959 *out_type = IRQ_TYPE_NONE;
Grant Likely16b2e6e2012-01-26 11:26:52 -0700960 return 0;
961}
962EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
Grant Likely7e713302011-07-26 03:19:06 -0600963
Grant Likelya18dc812012-01-26 12:12:14 -0700964const struct irq_domain_ops irq_domain_simple_ops = {
Grant Likely16b2e6e2012-01-26 11:26:52 -0700965 .xlate = irq_domain_xlate_onetwocell,
Grant Likely75294952012-02-14 14:06:57 -0700966};
967EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
Jiang Liuf8264e32014-11-06 22:20:14 +0800968
Qais Yousefac0a0cd2015-12-08 13:20:18 +0000969int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900970 int node, const struct cpumask *affinity)
Jiang Liuf8264e32014-11-06 22:20:14 +0800971{
972 unsigned int hint;
973
974 if (virq >= 0) {
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900975 virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
976 affinity);
Jiang Liuf8264e32014-11-06 22:20:14 +0800977 } else {
978 hint = hwirq % nr_irqs;
979 if (hint == 0)
980 hint++;
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900981 virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
982 affinity);
983 if (virq <= 0 && hint > 1) {
984 virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
985 affinity);
986 }
Jiang Liuf8264e32014-11-06 22:20:14 +0800987 }
988
989 return virq;
990}
991
992#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
Jiang Liuafb7da82014-11-15 22:24:02 +0800993/**
Marc Zyngier2a5e9a02015-10-13 12:51:43 +0100994 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
Jiang Liuafb7da82014-11-15 22:24:02 +0800995 * @parent: Parent irq domain to associate with the new domain
996 * @flags: Irq domain flags associated to the domain
997 * @size: Size of the domain. See below
Marc Zyngier2a5e9a02015-10-13 12:51:43 +0100998 * @fwnode: Optional fwnode of the interrupt controller
Jiang Liuafb7da82014-11-15 22:24:02 +0800999 * @ops: Pointer to the interrupt domain callbacks
1000 * @host_data: Controller private data pointer
1001 *
1002 * If @size is 0 a tree domain is created, otherwise a linear domain.
1003 *
1004 * If successful the parent is associated to the new domain and the
1005 * domain flags are set.
1006 * Returns pointer to IRQ domain, or NULL on failure.
1007 */
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001008struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
Jiang Liuafb7da82014-11-15 22:24:02 +08001009 unsigned int flags,
1010 unsigned int size,
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001011 struct fwnode_handle *fwnode,
Jiang Liuafb7da82014-11-15 22:24:02 +08001012 const struct irq_domain_ops *ops,
1013 void *host_data)
1014{
1015 struct irq_domain *domain;
1016
1017 if (size)
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001018 domain = irq_domain_create_linear(fwnode, size, ops, host_data);
Jiang Liuafb7da82014-11-15 22:24:02 +08001019 else
Marc Zyngier2a5e9a02015-10-13 12:51:43 +01001020 domain = irq_domain_create_tree(fwnode, ops, host_data);
Jiang Liuafb7da82014-11-15 22:24:02 +08001021 if (domain) {
1022 domain->parent = parent;
1023 domain->flags |= flags;
1024 }
1025
1026 return domain;
1027}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001028EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
Jiang Liuafb7da82014-11-15 22:24:02 +08001029
Jiang Liuf8264e32014-11-06 22:20:14 +08001030static void irq_domain_insert_irq(int virq)
1031{
1032 struct irq_data *data;
1033
1034 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1035 struct irq_domain *domain = data->domain;
Jiang Liuf8264e32014-11-06 22:20:14 +08001036
Thomas Gleixner9dc6be32017-06-20 01:37:16 +02001037 domain->mapcount++;
David Daneyb526adf2017-08-17 17:53:32 -07001038 irq_domain_set_mapping(domain, data->hwirq, data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001039
1040 /* If not already assigned, give the domain the chip's name */
1041 if (!domain->name && data->chip)
1042 domain->name = data->chip->name;
1043 }
1044
1045 irq_clear_status_flags(virq, IRQ_NOREQUEST);
1046}
1047
1048static void irq_domain_remove_irq(int virq)
1049{
1050 struct irq_data *data;
1051
1052 irq_set_status_flags(virq, IRQ_NOREQUEST);
1053 irq_set_chip_and_handler(virq, NULL, NULL);
1054 synchronize_irq(virq);
1055 smp_mb();
1056
1057 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1058 struct irq_domain *domain = data->domain;
1059 irq_hw_number_t hwirq = data->hwirq;
1060
Thomas Gleixner9dc6be32017-06-20 01:37:16 +02001061 domain->mapcount--;
David Daneyb526adf2017-08-17 17:53:32 -07001062 irq_domain_clear_mapping(domain, hwirq);
Jiang Liuf8264e32014-11-06 22:20:14 +08001063 }
1064}
1065
1066static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
1067 struct irq_data *child)
1068{
1069 struct irq_data *irq_data;
1070
Jiang Liu67830112015-06-01 16:05:13 +08001071 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
1072 irq_data_get_node(child));
Jiang Liuf8264e32014-11-06 22:20:14 +08001073 if (irq_data) {
1074 child->parent_data = irq_data;
1075 irq_data->irq = child->irq;
Jiang Liu0d0b4c82015-06-01 16:05:12 +08001076 irq_data->common = child->common;
Jiang Liuf8264e32014-11-06 22:20:14 +08001077 irq_data->domain = domain;
1078 }
1079
1080 return irq_data;
1081}
1082
1083static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
1084{
1085 struct irq_data *irq_data, *tmp;
1086 int i;
1087
1088 for (i = 0; i < nr_irqs; i++) {
1089 irq_data = irq_get_irq_data(virq + i);
1090 tmp = irq_data->parent_data;
1091 irq_data->parent_data = NULL;
1092 irq_data->domain = NULL;
1093
1094 while (tmp) {
1095 irq_data = tmp;
1096 tmp = tmp->parent_data;
1097 kfree(irq_data);
1098 }
1099 }
1100}
1101
1102static int irq_domain_alloc_irq_data(struct irq_domain *domain,
1103 unsigned int virq, unsigned int nr_irqs)
1104{
1105 struct irq_data *irq_data;
1106 struct irq_domain *parent;
1107 int i;
1108
1109 /* The outermost irq_data is embedded in struct irq_desc */
1110 for (i = 0; i < nr_irqs; i++) {
1111 irq_data = irq_get_irq_data(virq + i);
1112 irq_data->domain = domain;
1113
1114 for (parent = domain->parent; parent; parent = parent->parent) {
1115 irq_data = irq_domain_insert_irq_data(parent, irq_data);
1116 if (!irq_data) {
1117 irq_domain_free_irq_data(virq, i + 1);
1118 return -ENOMEM;
1119 }
1120 }
1121 }
1122
1123 return 0;
1124}
1125
1126/**
1127 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1128 * @domain: domain to match
1129 * @virq: IRQ number to get irq_data
1130 */
1131struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1132 unsigned int virq)
1133{
1134 struct irq_data *irq_data;
1135
1136 for (irq_data = irq_get_irq_data(virq); irq_data;
1137 irq_data = irq_data->parent_data)
1138 if (irq_data->domain == domain)
1139 return irq_data;
1140
1141 return NULL;
1142}
Jake Oshinsa4289dc2015-12-10 17:52:59 +00001143EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001144
1145/**
1146 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1147 * @domain: Interrupt domain to match
1148 * @virq: IRQ number
1149 * @hwirq: The hwirq number
1150 * @chip: The associated interrupt chip
1151 * @chip_data: The associated chip data
1152 */
1153int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1154 irq_hw_number_t hwirq, struct irq_chip *chip,
1155 void *chip_data)
1156{
1157 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
1158
1159 if (!irq_data)
1160 return -ENOENT;
1161
1162 irq_data->hwirq = hwirq;
1163 irq_data->chip = chip ? chip : &no_irq_chip;
1164 irq_data->chip_data = chip_data;
1165
1166 return 0;
1167}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001168EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
Jiang Liuf8264e32014-11-06 22:20:14 +08001169
1170/**
Jiang Liu1b537702014-11-09 23:10:24 +08001171 * irq_domain_set_info - Set the complete data for a @virq in @domain
1172 * @domain: Interrupt domain to match
1173 * @virq: IRQ number
1174 * @hwirq: The hardware interrupt number
1175 * @chip: The associated interrupt chip
1176 * @chip_data: The associated interrupt chip data
1177 * @handler: The interrupt flow handler
1178 * @handler_data: The interrupt flow handler data
1179 * @handler_name: The interrupt handler name
1180 */
1181void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1182 irq_hw_number_t hwirq, struct irq_chip *chip,
1183 void *chip_data, irq_flow_handler_t handler,
1184 void *handler_data, const char *handler_name)
1185{
1186 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
1187 __irq_set_handler(virq, handler, 0, handler_name);
1188 irq_set_handler_data(virq, handler_data);
1189}
Keith Busch64bce3e2016-01-12 13:18:07 -07001190EXPORT_SYMBOL(irq_domain_set_info);
Jiang Liu1b537702014-11-09 23:10:24 +08001191
1192/**
Jiang Liuf8264e32014-11-06 22:20:14 +08001193 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1194 * @irq_data: The pointer to irq_data
1195 */
1196void irq_domain_reset_irq_data(struct irq_data *irq_data)
1197{
1198 irq_data->hwirq = 0;
1199 irq_data->chip = &no_irq_chip;
1200 irq_data->chip_data = NULL;
1201}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001202EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001203
1204/**
1205 * irq_domain_free_irqs_common - Clear irq_data and free the parent
1206 * @domain: Interrupt domain to match
1207 * @virq: IRQ number to start with
1208 * @nr_irqs: The number of irqs to free
1209 */
1210void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
1211 unsigned int nr_irqs)
1212{
1213 struct irq_data *irq_data;
1214 int i;
1215
1216 for (i = 0; i < nr_irqs; i++) {
1217 irq_data = irq_domain_get_irq_data(domain, virq + i);
1218 if (irq_data)
1219 irq_domain_reset_irq_data(irq_data);
1220 }
1221 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1222}
Axel Lin63cc7872016-03-17 12:00:31 +08001223EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
Jiang Liuf8264e32014-11-06 22:20:14 +08001224
1225/**
1226 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
1227 * @domain: Interrupt domain to match
1228 * @virq: IRQ number to start with
1229 * @nr_irqs: The number of irqs to free
1230 */
1231void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
1232 unsigned int nr_irqs)
1233{
1234 int i;
1235
1236 for (i = 0; i < nr_irqs; i++) {
1237 irq_set_handler_data(virq + i, NULL);
1238 irq_set_handler(virq + i, NULL);
1239 }
1240 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1241}
1242
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001243static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
Jiang Liu36d72732014-11-15 22:24:01 +08001244 unsigned int irq_base,
1245 unsigned int nr_irqs)
1246{
David Daney0d12ec02017-08-17 17:53:33 -07001247 if (domain->ops->free)
1248 domain->ops->free(domain, irq_base, nr_irqs);
Jiang Liu36d72732014-11-15 22:24:01 +08001249}
1250
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001251int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
Marc Zyngierc4665952015-11-23 08:26:04 +00001252 unsigned int irq_base,
1253 unsigned int nr_irqs, void *arg)
Jiang Liu36d72732014-11-15 22:24:01 +08001254{
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001255 return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
Jiang Liu36d72732014-11-15 22:24:01 +08001256}
1257
Jiang Liuf8264e32014-11-06 22:20:14 +08001258/**
1259 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1260 * @domain: domain to allocate from
1261 * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
1262 * @nr_irqs: number of IRQs to allocate
1263 * @node: NUMA node id for memory allocation
1264 * @arg: domain specific argument
1265 * @realloc: IRQ descriptors have already been allocated if true
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001266 * @affinity: Optional irq affinity mask for multiqueue devices
Jiang Liuf8264e32014-11-06 22:20:14 +08001267 *
1268 * Allocate IRQ numbers and initialized all data structures to support
1269 * hierarchy IRQ domains.
1270 * Parameter @realloc is mainly to support legacy IRQs.
1271 * Returns error code or allocated IRQ number
1272 *
1273 * The whole process to setup an IRQ has been split into two steps.
1274 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1275 * descriptor and required hardware resources. The second step,
1276 * irq_domain_activate_irq(), is to program hardwares with preallocated
1277 * resources. In this way, it's easier to rollback when failing to
1278 * allocate resources.
1279 */
1280int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1281 unsigned int nr_irqs, int node, void *arg,
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001282 bool realloc, const struct cpumask *affinity)
Jiang Liuf8264e32014-11-06 22:20:14 +08001283{
1284 int i, ret, virq;
1285
1286 if (domain == NULL) {
1287 domain = irq_default_domain;
1288 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1289 return -EINVAL;
1290 }
1291
1292 if (!domain->ops->alloc) {
1293 pr_debug("domain->ops->alloc() is NULL\n");
1294 return -ENOSYS;
1295 }
1296
1297 if (realloc && irq_base >= 0) {
1298 virq = irq_base;
1299 } else {
Thomas Gleixner06ee6d52016-07-04 17:39:24 +09001300 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
1301 affinity);
Jiang Liuf8264e32014-11-06 22:20:14 +08001302 if (virq < 0) {
1303 pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1304 irq_base, nr_irqs);
1305 return virq;
1306 }
1307 }
1308
1309 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1310 pr_debug("cannot allocate memory for IRQ%d\n", virq);
1311 ret = -ENOMEM;
1312 goto out_free_desc;
1313 }
1314
1315 mutex_lock(&irq_domain_mutex);
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001316 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
Jiang Liuf8264e32014-11-06 22:20:14 +08001317 if (ret < 0) {
1318 mutex_unlock(&irq_domain_mutex);
1319 goto out_free_irq_data;
1320 }
1321 for (i = 0; i < nr_irqs; i++)
1322 irq_domain_insert_irq(virq + i);
1323 mutex_unlock(&irq_domain_mutex);
1324
1325 return virq;
1326
1327out_free_irq_data:
1328 irq_domain_free_irq_data(virq, nr_irqs);
1329out_free_desc:
1330 irq_free_descs(virq, nr_irqs);
1331 return ret;
1332}
1333
David Daney495c38d2017-08-17 17:53:34 -07001334/* The irq_data was moved, fix the revmap to refer to the new location */
1335static void irq_domain_fix_revmap(struct irq_data *d)
1336{
Masahiro Yamadad03cc2d2017-09-22 21:20:41 +09001337 void __rcu **slot;
David Daney495c38d2017-08-17 17:53:34 -07001338
1339 if (d->hwirq < d->domain->revmap_size)
1340 return; /* Not using radix tree. */
1341
1342 /* Fix up the revmap. */
Masahiro Yamadaf1d78352017-10-05 10:44:54 +09001343 mutex_lock(&d->domain->revmap_tree_mutex);
David Daney495c38d2017-08-17 17:53:34 -07001344 slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1345 if (slot)
1346 radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
Masahiro Yamadaf1d78352017-10-05 10:44:54 +09001347 mutex_unlock(&d->domain->revmap_tree_mutex);
David Daney495c38d2017-08-17 17:53:34 -07001348}
1349
1350/**
1351 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1352 * @domain: Domain to push.
1353 * @virq: Irq to push the domain in to.
1354 * @arg: Passed to the irq_domain_ops alloc() function.
1355 *
1356 * For an already existing irqdomain hierarchy, as might be obtained
1357 * via a call to pci_enable_msix(), add an additional domain to the
1358 * head of the processing chain. Must be called before request_irq()
1359 * has been called.
1360 */
1361int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1362{
1363 struct irq_data *child_irq_data;
1364 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1365 struct irq_desc *desc;
1366 int rv = 0;
1367
1368 /*
1369 * Check that no action has been set, which indicates the virq
1370 * is in a state where this function doesn't have to deal with
1371 * races between interrupt handling and maintaining the
1372 * hierarchy. This will catch gross misuse. Attempting to
1373 * make the check race free would require holding locks across
1374 * calls to struct irq_domain_ops->alloc(), which could lead
1375 * to deadlock, so we just do a simple check before starting.
1376 */
1377 desc = irq_to_desc(virq);
1378 if (!desc)
1379 return -EINVAL;
1380 if (WARN_ON(desc->action))
1381 return -EBUSY;
1382
1383 if (domain == NULL)
1384 return -EINVAL;
1385
1386 if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1387 return -EINVAL;
1388
Dan Carpenter20c4d492017-08-25 15:14:09 +03001389 if (!root_irq_data)
David Daney495c38d2017-08-17 17:53:34 -07001390 return -EINVAL;
1391
Dan Carpenter20c4d492017-08-25 15:14:09 +03001392 if (domain->parent != root_irq_data->domain)
David Daney495c38d2017-08-17 17:53:34 -07001393 return -EINVAL;
1394
1395 child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1396 irq_data_get_node(root_irq_data));
1397 if (!child_irq_data)
1398 return -ENOMEM;
1399
1400 mutex_lock(&irq_domain_mutex);
1401
1402 /* Copy the original irq_data. */
1403 *child_irq_data = *root_irq_data;
1404
1405 /*
1406 * Overwrite the root_irq_data, which is embedded in struct
1407 * irq_desc, with values for this domain.
1408 */
1409 root_irq_data->parent_data = child_irq_data;
1410 root_irq_data->domain = domain;
1411 root_irq_data->mask = 0;
1412 root_irq_data->hwirq = 0;
1413 root_irq_data->chip = NULL;
1414 root_irq_data->chip_data = NULL;
1415
1416 /* May (probably does) set hwirq, chip, etc. */
1417 rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1418 if (rv) {
1419 /* Restore the original irq_data. */
1420 *root_irq_data = *child_irq_data;
1421 goto error;
1422 }
1423
1424 irq_domain_fix_revmap(child_irq_data);
1425 irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1426
1427error:
1428 mutex_unlock(&irq_domain_mutex);
1429
1430 return rv;
1431}
1432EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1433
1434/**
1435 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1436 * @domain: Domain to remove.
1437 * @virq: Irq to remove the domain from.
1438 *
1439 * Undo the effects of a call to irq_domain_push_irq(). Must be
1440 * called either before request_irq() or after free_irq().
1441 */
1442int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1443{
1444 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1445 struct irq_data *child_irq_data;
1446 struct irq_data *tmp_irq_data;
1447 struct irq_desc *desc;
1448
1449 /*
1450 * Check that no action is set, which indicates the virq is in
1451 * a state where this function doesn't have to deal with races
1452 * between interrupt handling and maintaining the hierarchy.
1453 * This will catch gross misuse. Attempting to make the check
1454 * race free would require holding locks across calls to
1455 * struct irq_domain_ops->free(), which could lead to
1456 * deadlock, so we just do a simple check before starting.
1457 */
1458 desc = irq_to_desc(virq);
1459 if (!desc)
1460 return -EINVAL;
1461 if (WARN_ON(desc->action))
1462 return -EBUSY;
1463
1464 if (domain == NULL)
1465 return -EINVAL;
1466
1467 if (!root_irq_data)
1468 return -EINVAL;
1469
1470 tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1471
1472 /* We can only "pop" if this domain is at the top of the list */
1473 if (WARN_ON(root_irq_data != tmp_irq_data))
1474 return -EINVAL;
1475
1476 if (WARN_ON(root_irq_data->domain != domain))
1477 return -EINVAL;
1478
1479 child_irq_data = root_irq_data->parent_data;
1480 if (WARN_ON(!child_irq_data))
1481 return -EINVAL;
1482
1483 mutex_lock(&irq_domain_mutex);
1484
1485 root_irq_data->parent_data = NULL;
1486
1487 irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1488 irq_domain_free_irqs_hierarchy(domain, virq, 1);
1489
1490 /* Restore the original irq_data. */
1491 *root_irq_data = *child_irq_data;
1492
1493 irq_domain_fix_revmap(root_irq_data);
1494
1495 mutex_unlock(&irq_domain_mutex);
1496
1497 kfree(child_irq_data);
1498
1499 return 0;
1500}
1501EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1502
Jiang Liuf8264e32014-11-06 22:20:14 +08001503/**
1504 * irq_domain_free_irqs - Free IRQ number and associated data structures
1505 * @virq: base IRQ number
1506 * @nr_irqs: number of IRQs to free
1507 */
1508void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1509{
1510 struct irq_data *data = irq_get_irq_data(virq);
1511 int i;
1512
1513 if (WARN(!data || !data->domain || !data->domain->ops->free,
1514 "NULL pointer, cannot free irq\n"))
1515 return;
1516
1517 mutex_lock(&irq_domain_mutex);
1518 for (i = 0; i < nr_irqs; i++)
1519 irq_domain_remove_irq(virq + i);
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001520 irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
Jiang Liuf8264e32014-11-06 22:20:14 +08001521 mutex_unlock(&irq_domain_mutex);
1522
1523 irq_domain_free_irq_data(virq, nr_irqs);
1524 irq_free_descs(virq, nr_irqs);
1525}
1526
1527/**
Jiang Liu36d72732014-11-15 22:24:01 +08001528 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1529 * @irq_base: Base IRQ number
1530 * @nr_irqs: Number of IRQs to allocate
1531 * @arg: Allocation data (arch/domain specific)
1532 *
1533 * Check whether the domain has been setup recursive. If not allocate
1534 * through the parent domain.
1535 */
1536int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1537 unsigned int irq_base, unsigned int nr_irqs,
1538 void *arg)
1539{
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001540 if (!domain->parent)
1541 return -ENOSYS;
Jiang Liu36d72732014-11-15 22:24:01 +08001542
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001543 return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1544 nr_irqs, arg);
Jiang Liu36d72732014-11-15 22:24:01 +08001545}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001546EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
Jiang Liu36d72732014-11-15 22:24:01 +08001547
1548/**
1549 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1550 * @irq_base: Base IRQ number
1551 * @nr_irqs: Number of IRQs to free
1552 *
1553 * Check whether the domain has been setup recursive. If not free
1554 * through the parent domain.
1555 */
1556void irq_domain_free_irqs_parent(struct irq_domain *domain,
1557 unsigned int irq_base, unsigned int nr_irqs)
1558{
Marc Zyngier6a6544e2017-06-20 22:17:44 +01001559 if (!domain->parent)
1560 return;
1561
1562 irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
Jiang Liu36d72732014-11-15 22:24:01 +08001563}
Quan Nguyen52b2a052016-03-03 21:56:52 +07001564EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
Jiang Liu36d72732014-11-15 22:24:01 +08001565
Marc Zyngier08d85f32017-01-17 16:00:48 +00001566static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1567{
1568 if (irq_data && irq_data->domain) {
1569 struct irq_domain *domain = irq_data->domain;
1570
1571 if (domain->ops->deactivate)
1572 domain->ops->deactivate(domain, irq_data);
1573 if (irq_data->parent_data)
1574 __irq_domain_deactivate_irq(irq_data->parent_data);
1575 }
1576}
1577
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001578static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001579{
1580 int ret = 0;
1581
1582 if (irqd && irqd->domain) {
1583 struct irq_domain *domain = irqd->domain;
1584
1585 if (irqd->parent_data)
Thomas Gleixner42e1cc22017-09-13 23:29:12 +02001586 ret = __irq_domain_activate_irq(irqd->parent_data,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001587 reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001588 if (!ret && domain->ops->activate) {
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001589 ret = domain->ops->activate(domain, irqd, reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001590 /* Rollback in case of error */
1591 if (ret && irqd->parent_data)
1592 __irq_domain_deactivate_irq(irqd->parent_data);
1593 }
1594 }
1595 return ret;
1596}
1597
Jiang Liu36d72732014-11-15 22:24:01 +08001598/**
Jiang Liuf8264e32014-11-06 22:20:14 +08001599 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1600 * interrupt
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001601 * @irq_data: Outermost irq_data associated with interrupt
1602 * @reserve: If set only reserve an interrupt vector instead of assigning one
Jiang Liuf8264e32014-11-06 22:20:14 +08001603 *
1604 * This is the second step to call domain_ops->activate to program interrupt
1605 * controllers, so the interrupt could actually get delivered.
1606 */
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001607int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
Jiang Liuf8264e32014-11-06 22:20:14 +08001608{
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001609 int ret = 0;
1610
1611 if (!irqd_is_activated(irq_data))
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01001612 ret = __irq_domain_activate_irq(irq_data, reserve);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001613 if (!ret)
Marc Zyngier08d85f32017-01-17 16:00:48 +00001614 irqd_set_activated(irq_data);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +02001615 return ret;
Jiang Liuf8264e32014-11-06 22:20:14 +08001616}
1617
1618/**
1619 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1620 * deactivate interrupt
1621 * @irq_data: outermost irq_data associated with interrupt
1622 *
1623 * It calls domain_ops->deactivate to program interrupt controllers to disable
1624 * interrupt delivery.
1625 */
1626void irq_domain_deactivate_irq(struct irq_data *irq_data)
1627{
Marc Zyngier08d85f32017-01-17 16:00:48 +00001628 if (irqd_is_activated(irq_data)) {
1629 __irq_domain_deactivate_irq(irq_data);
1630 irqd_clr_activated(irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001631 }
1632}
1633
1634static void irq_domain_check_hierarchy(struct irq_domain *domain)
1635{
1636 /* Hierarchy irq_domains must implement callback alloc() */
1637 if (domain->ops->alloc)
1638 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1639}
Eric Auger631a9632017-01-19 20:57:57 +00001640
1641/**
1642 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
1643 * parent has MSI remapping support
1644 * @domain: domain pointer
1645 */
1646bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
1647{
1648 for (; domain; domain = domain->parent) {
1649 if (irq_domain_is_msi_remap(domain))
1650 return true;
1651 }
1652 return false;
1653}
Jiang Liuf8264e32014-11-06 22:20:14 +08001654#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1655/**
1656 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1657 * @domain: domain to match
1658 * @virq: IRQ number to get irq_data
1659 */
1660struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1661 unsigned int virq)
1662{
1663 struct irq_data *irq_data = irq_get_irq_data(virq);
1664
1665 return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1666}
Jake Oshinsa4289dc2015-12-10 17:52:59 +00001667EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
Jiang Liuf8264e32014-11-06 22:20:14 +08001668
Stefan Agner5f22f5c2015-05-16 11:44:13 +02001669/**
1670 * irq_domain_set_info - Set the complete data for a @virq in @domain
1671 * @domain: Interrupt domain to match
1672 * @virq: IRQ number
1673 * @hwirq: The hardware interrupt number
1674 * @chip: The associated interrupt chip
1675 * @chip_data: The associated interrupt chip data
1676 * @handler: The interrupt flow handler
1677 * @handler_data: The interrupt flow handler data
1678 * @handler_name: The interrupt handler name
1679 */
1680void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1681 irq_hw_number_t hwirq, struct irq_chip *chip,
1682 void *chip_data, irq_flow_handler_t handler,
1683 void *handler_data, const char *handler_name)
1684{
1685 irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
1686 irq_set_chip_data(virq, chip_data);
1687 irq_set_handler_data(virq, handler_data);
1688}
1689
Jiang Liuf8264e32014-11-06 22:20:14 +08001690static void irq_domain_check_hierarchy(struct irq_domain *domain)
1691{
1692}
1693#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001694
1695#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1696static struct dentry *domain_dir;
1697
1698static void
1699irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1700{
1701 seq_printf(m, "%*sname: %s\n", ind, "", d->name);
1702 seq_printf(m, "%*ssize: %u\n", ind + 1, "",
1703 d->revmap_size + d->revmap_direct_max_irq);
1704 seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1705 seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
Thomas Gleixnerc3e72392017-09-13 23:29:06 +02001706 if (d->ops && d->ops->debug_show)
1707 d->ops->debug_show(m, d, NULL, ind + 1);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001708#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1709 if (!d->parent)
1710 return;
1711 seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
1712 irq_domain_debug_show_one(m, d->parent, ind + 4);
1713#endif
1714}
1715
1716static int irq_domain_debug_show(struct seq_file *m, void *p)
1717{
1718 struct irq_domain *d = m->private;
1719
1720 /* Default domain? Might be NULL */
1721 if (!d) {
1722 if (!irq_default_domain)
1723 return 0;
1724 d = irq_default_domain;
1725 }
1726 irq_domain_debug_show_one(m, d, 0);
1727 return 0;
1728}
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001729DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001730
1731static void debugfs_add_domain_dir(struct irq_domain *d)
1732{
1733 if (!d->name || !domain_dir || d->debugfs_file)
1734 return;
1735 d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001736 &irq_domain_debug_fops);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001737}
1738
1739static void debugfs_remove_domain_dir(struct irq_domain *d)
1740{
Thomas Gleixnerf610c9d2017-07-07 08:57:57 +02001741 debugfs_remove(d->debugfs_file);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001742}
1743
1744void __init irq_domain_debugfs_init(struct dentry *root)
1745{
1746 struct irq_domain *d;
1747
1748 domain_dir = debugfs_create_dir("domains", root);
1749 if (!domain_dir)
1750 return;
1751
Andy Shevchenko0b24a0b2018-02-14 17:47:35 +02001752 debugfs_create_file("default", 0444, domain_dir, NULL,
1753 &irq_domain_debug_fops);
Thomas Gleixner087cdfb2017-06-20 01:37:17 +02001754 mutex_lock(&irq_domain_mutex);
1755 list_for_each_entry(d, &irq_domain_list, link)
1756 debugfs_add_domain_dir(d);
1757 mutex_unlock(&irq_domain_mutex);
1758}
1759#endif