blob: ab5e83f41188c2d5985f61faa48c6a34599e6eca [file] [log] [blame]
Thomas Gleixner52a65ff2018-03-14 22:15:19 +01001// SPDX-License-Identifier: GPL-2.0
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01002/*
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01003 * Copyright (C) 2014 Intel Corp.
4 * Author: Jiang Liu <jiang.liu@linux.intel.com>
5 *
6 * This file is licensed under GPLv2.
7 *
Ingo Molnara359f752021-03-22 04:21:30 +01008 * This file contains common code to support Message Signaled Interrupts for
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01009 * PCI compatible and non PCI compatible devices.
10 */
Jiang Liuaeeb5962014-11-15 22:24:05 +080011#include <linux/types.h>
12#include <linux/device.h>
Jiang Liuf3cf8bb2014-11-12 11:39:03 +010013#include <linux/irq.h>
14#include <linux/irqdomain.h>
15#include <linux/msi.h>
Marc Zyngier4e201562016-11-22 09:21:16 +000016#include <linux/slab.h>
Thomas Gleixner3ba1f052021-12-06 23:27:31 +010017#include <linux/sysfs.h>
Barry Song2f170812021-08-13 15:56:27 +120018#include <linux/pci.h>
Jiang Liud9109692014-11-15 22:24:04 +080019
Thomas Gleixner07557cc2017-09-13 23:29:05 +020020#include "internals.h"
21
Thomas Gleixner28f4b042016-09-14 16:18:47 +020022/**
Randy Dunlap3b35e7e2021-08-10 16:48:35 -070023 * alloc_msi_entry - Allocate an initialized msi_desc
Thomas Gleixner28f4b042016-09-14 16:18:47 +020024 * @dev: Pointer to the device for which this is allocated
25 * @nvec: The number of vectors used in this entry
26 * @affinity: Optional pointer to an affinity mask array size of @nvec
27 *
Randy Dunlap3b35e7e2021-08-10 16:48:35 -070028 * If @affinity is not %NULL then an affinity array[@nvec] is allocated
Dou Liyangbec04032018-12-04 23:51:20 +080029 * and the affinity masks and flags from @affinity are copied.
Randy Dunlap3b35e7e2021-08-10 16:48:35 -070030 *
31 * Return: pointer to allocated &msi_desc on success or %NULL on failure
Thomas Gleixner28f4b042016-09-14 16:18:47 +020032 */
Dou Liyangbec04032018-12-04 23:51:20 +080033struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
34 const struct irq_affinity_desc *affinity)
Jiang Liuaa48b6f2015-07-09 16:00:47 +080035{
Thomas Gleixner28f4b042016-09-14 16:18:47 +020036 struct msi_desc *desc;
37
38 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
Jiang Liuaa48b6f2015-07-09 16:00:47 +080039 if (!desc)
40 return NULL;
41
42 INIT_LIST_HEAD(&desc->list);
43 desc->dev = dev;
Thomas Gleixner28f4b042016-09-14 16:18:47 +020044 desc->nvec_used = nvec;
45 if (affinity) {
46 desc->affinity = kmemdup(affinity,
47 nvec * sizeof(*desc->affinity), GFP_KERNEL);
48 if (!desc->affinity) {
49 kfree(desc);
50 return NULL;
51 }
52 }
Jiang Liuaa48b6f2015-07-09 16:00:47 +080053
54 return desc;
55}
56
57void free_msi_entry(struct msi_desc *entry)
58{
Thomas Gleixner28f4b042016-09-14 16:18:47 +020059 kfree(entry->affinity);
Jiang Liuaa48b6f2015-07-09 16:00:47 +080060 kfree(entry);
61}
62
Jiang Liu38b6a1c2014-11-12 12:11:25 +010063void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
64{
65 *msg = entry->msg;
66}
67
68void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
69{
70 struct msi_desc *entry = irq_get_msi_desc(irq);
71
72 __get_cached_msi_msg(entry, msg);
73}
74EXPORT_SYMBOL_GPL(get_cached_msi_msg);
75
Thomas Gleixner013bd8e2021-12-10 23:18:55 +010076static void msi_device_data_release(struct device *dev, void *res)
77{
78 WARN_ON_ONCE(!list_empty(&dev->msi_list));
79 dev->msi.data = NULL;
80}
81
82/**
83 * msi_setup_device_data - Setup MSI device data
84 * @dev: Device for which MSI device data should be set up
85 *
86 * Return: 0 on success, appropriate error code otherwise
87 *
88 * This can be called more than once for @dev. If the MSI device data is
89 * already allocated the call succeeds. The allocated memory is
90 * automatically released when the device is destroyed.
91 */
92int msi_setup_device_data(struct device *dev)
93{
94 struct msi_device_data *md;
95
96 if (dev->msi.data)
97 return 0;
98
99 md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
100 if (!md)
101 return -ENOMEM;
102
103 dev->msi.data = md;
104 devres_add(dev, md);
105 return 0;
106}
107
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100108/**
109 * msi_get_virq - Return Linux interrupt number of a MSI interrupt
110 * @dev: Device to operate on
111 * @index: MSI interrupt index to look for (0-based)
112 *
113 * Return: The Linux interrupt number on success (> 0), 0 if not found
114 */
115unsigned int msi_get_virq(struct device *dev, unsigned int index)
116{
117 struct msi_desc *desc;
118 bool pcimsi;
119
120 if (!dev->msi.data)
121 return 0;
122
123 pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
124
125 for_each_msi_entry(desc, dev) {
126 /* PCI-MSI has only one descriptor for multiple interrupts. */
127 if (pcimsi) {
128 if (desc->irq && index < desc->nvec_used)
129 return desc->irq + index;
130 break;
131 }
132
133 /*
134 * PCI-MSIX and platform MSI use a descriptor per
135 * interrupt.
136 */
137 if (desc->msi_index == index)
138 return desc->irq;
139 }
140 return 0;
141}
142EXPORT_SYMBOL_GPL(msi_get_virq);
143
Thomas Gleixner11975282021-12-06 23:27:28 +0100144#ifdef CONFIG_SYSFS
Barry Song2f170812021-08-13 15:56:27 +1200145static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
146 char *buf)
147{
Thomas Gleixner6ef7f772021-12-10 23:18:49 +0100148 /* MSI vs. MSIX is per device not per interrupt */
149 bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
Barry Song2f170812021-08-13 15:56:27 +1200150
151 return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
152}
153
154/**
155 * msi_populate_sysfs - Populate msi_irqs sysfs entries for devices
156 * @dev: The device(PCI, platform etc) who will get sysfs entries
Barry Song2f170812021-08-13 15:56:27 +1200157 */
Thomas Gleixner24cff372021-12-10 23:19:08 +0100158static const struct attribute_group **msi_populate_sysfs(struct device *dev)
Barry Song2f170812021-08-13 15:56:27 +1200159{
160 const struct attribute_group **msi_irq_groups;
161 struct attribute **msi_attrs, *msi_attr;
162 struct device_attribute *msi_dev_attr;
163 struct attribute_group *msi_irq_group;
164 struct msi_desc *entry;
165 int ret = -ENOMEM;
166 int num_msi = 0;
167 int count = 0;
168 int i;
169
170 /* Determine how many msi entries we have */
171 for_each_msi_entry(entry, dev)
172 num_msi += entry->nvec_used;
173 if (!num_msi)
174 return NULL;
175
176 /* Dynamically create the MSI attributes for the device */
177 msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
178 if (!msi_attrs)
179 return ERR_PTR(-ENOMEM);
180
181 for_each_msi_entry(entry, dev) {
182 for (i = 0; i < entry->nvec_used; i++) {
183 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
184 if (!msi_dev_attr)
185 goto error_attrs;
186 msi_attrs[count] = &msi_dev_attr->attr;
187
188 sysfs_attr_init(&msi_dev_attr->attr);
189 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
190 entry->irq + i);
191 if (!msi_dev_attr->attr.name)
192 goto error_attrs;
193 msi_dev_attr->attr.mode = 0444;
194 msi_dev_attr->show = msi_mode_show;
195 ++count;
196 }
197 }
198
199 msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
200 if (!msi_irq_group)
201 goto error_attrs;
202 msi_irq_group->name = "msi_irqs";
203 msi_irq_group->attrs = msi_attrs;
204
205 msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
206 if (!msi_irq_groups)
207 goto error_irq_group;
208 msi_irq_groups[0] = msi_irq_group;
209
210 ret = sysfs_create_groups(&dev->kobj, msi_irq_groups);
211 if (ret)
212 goto error_irq_groups;
213
214 return msi_irq_groups;
215
216error_irq_groups:
217 kfree(msi_irq_groups);
218error_irq_group:
219 kfree(msi_irq_group);
220error_attrs:
221 count = 0;
222 msi_attr = msi_attrs[count];
223 while (msi_attr) {
224 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
225 kfree(msi_attr->name);
226 kfree(msi_dev_attr);
227 ++count;
228 msi_attr = msi_attrs[count];
229 }
230 kfree(msi_attrs);
231 return ERR_PTR(ret);
232}
233
234/**
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100235 * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
236 * @dev: The device (PCI, platform etc) which will get sysfs entries
237 */
238int msi_device_populate_sysfs(struct device *dev)
239{
240 const struct attribute_group **group = msi_populate_sysfs(dev);
241
242 if (IS_ERR(group))
243 return PTR_ERR(group);
244 dev->msi.data->attrs = group;
245 return 0;
246}
247
248/**
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100249 * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
250 * @dev: The device (PCI, platform etc) for which to remove
251 * sysfs entries
252 */
253void msi_device_destroy_sysfs(struct device *dev)
254{
Thomas Gleixner24cff372021-12-10 23:19:08 +0100255 const struct attribute_group **msi_irq_groups = dev->msi.data->attrs;
256 struct device_attribute *dev_attr;
257 struct attribute **msi_attrs;
258 int count = 0;
259
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100260 dev->msi.data->attrs = NULL;
Thomas Gleixner24cff372021-12-10 23:19:08 +0100261 if (!msi_irq_groups)
262 return;
263
264 sysfs_remove_groups(&dev->kobj, msi_irq_groups);
265 msi_attrs = msi_irq_groups[0]->attrs;
266 while (msi_attrs[count]) {
267 dev_attr = container_of(msi_attrs[count], struct device_attribute, attr);
268 kfree(dev_attr->attr.name);
269 kfree(dev_attr);
270 ++count;
271 }
272 kfree(msi_attrs);
273 kfree(msi_irq_groups[0]);
274 kfree(msi_irq_groups);
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100275}
Thomas Gleixner11975282021-12-06 23:27:28 +0100276#endif
Barry Song2f170812021-08-13 15:56:27 +1200277
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100278#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
Thomas Gleixner74faaf72014-12-06 21:20:20 +0100279static inline void irq_chip_write_msi_msg(struct irq_data *data,
280 struct msi_msg *msg)
281{
282 data->chip->irq_write_msi_msg(data, msg);
283}
284
Marc Zyngier0be81532018-05-08 13:14:30 +0100285static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
286{
287 struct msi_domain_info *info = domain->host_data;
288
289 /*
290 * If the MSI provider has messed with the second message and
291 * not advertized that it is level-capable, signal the breakage.
292 */
293 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
294 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
295 (msg[1].address_lo || msg[1].address_hi || msg[1].data));
296}
297
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100298/**
299 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
300 * @irq_data: The irq data associated to the interrupt
301 * @mask: The affinity mask to set
302 * @force: Flag to enforce setting (disable online checks)
303 *
304 * Intended to be used by MSI interrupt controllers which are
305 * implemented with hierarchical domains.
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700306 *
307 * Return: IRQ_SET_MASK_* result code
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100308 */
309int msi_domain_set_affinity(struct irq_data *irq_data,
310 const struct cpumask *mask, bool force)
311{
312 struct irq_data *parent = irq_data->parent_data;
Marc Zyngier0be81532018-05-08 13:14:30 +0100313 struct msi_msg msg[2] = { [1] = { }, };
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100314 int ret;
315
316 ret = parent->chip->irq_set_affinity(parent, mask, force);
317 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
Marc Zyngier0be81532018-05-08 13:14:30 +0100318 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
319 msi_check_level(irq_data->domain, msg);
320 irq_chip_write_msi_msg(irq_data, msg);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100321 }
322
323 return ret;
324}
325
Thomas Gleixner72491642017-09-13 23:29:10 +0200326static int msi_domain_activate(struct irq_domain *domain,
327 struct irq_data *irq_data, bool early)
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100328{
Marc Zyngier0be81532018-05-08 13:14:30 +0100329 struct msi_msg msg[2] = { [1] = { }, };
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100330
Marc Zyngier0be81532018-05-08 13:14:30 +0100331 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
332 msi_check_level(irq_data->domain, msg);
333 irq_chip_write_msi_msg(irq_data, msg);
Thomas Gleixner72491642017-09-13 23:29:10 +0200334 return 0;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100335}
336
337static void msi_domain_deactivate(struct irq_domain *domain,
338 struct irq_data *irq_data)
339{
Marc Zyngier0be81532018-05-08 13:14:30 +0100340 struct msi_msg msg[2];
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100341
Marc Zyngier0be81532018-05-08 13:14:30 +0100342 memset(msg, 0, sizeof(msg));
343 irq_chip_write_msi_msg(irq_data, msg);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100344}
345
346static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
347 unsigned int nr_irqs, void *arg)
348{
349 struct msi_domain_info *info = domain->host_data;
350 struct msi_domain_ops *ops = info->ops;
351 irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
352 int i, ret;
353
354 if (irq_find_mapping(domain, hwirq) > 0)
355 return -EEXIST;
356
Liu Jiangbf6f8692016-01-12 13:18:06 -0700357 if (domain->parent) {
358 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
359 if (ret < 0)
360 return ret;
361 }
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100362
363 for (i = 0; i < nr_irqs; i++) {
364 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
365 if (ret < 0) {
366 if (ops->msi_free) {
367 for (i--; i > 0; i--)
368 ops->msi_free(domain, info, virq + i);
369 }
370 irq_domain_free_irqs_top(domain, virq, nr_irqs);
371 return ret;
372 }
373 }
374
375 return 0;
376}
377
378static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
379 unsigned int nr_irqs)
380{
381 struct msi_domain_info *info = domain->host_data;
382 int i;
383
384 if (info->ops->msi_free) {
385 for (i = 0; i < nr_irqs; i++)
386 info->ops->msi_free(domain, info, virq + i);
387 }
388 irq_domain_free_irqs_top(domain, virq, nr_irqs);
389}
390
Krzysztof Kozlowski01364022015-04-27 21:54:23 +0900391static const struct irq_domain_ops msi_domain_ops = {
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100392 .alloc = msi_domain_alloc,
393 .free = msi_domain_free,
394 .activate = msi_domain_activate,
395 .deactivate = msi_domain_deactivate,
396};
397
Jiang Liuaeeb5962014-11-15 22:24:05 +0800398static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
399 msi_alloc_info_t *arg)
400{
401 return arg->hwirq;
402}
403
404static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
405 int nvec, msi_alloc_info_t *arg)
406{
407 memset(arg, 0, sizeof(*arg));
408 return 0;
409}
410
411static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
412 struct msi_desc *desc)
413{
414 arg->desc = desc;
415}
Jiang Liuaeeb5962014-11-15 22:24:05 +0800416
417static int msi_domain_ops_init(struct irq_domain *domain,
418 struct msi_domain_info *info,
419 unsigned int virq, irq_hw_number_t hwirq,
420 msi_alloc_info_t *arg)
421{
422 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
423 info->chip_data);
424 if (info->handler && info->handler_name) {
425 __irq_set_handler(virq, info->handler, 0, info->handler_name);
426 if (info->handler_data)
427 irq_set_handler_data(virq, info->handler_data);
428 }
429 return 0;
430}
431
432static int msi_domain_ops_check(struct irq_domain *domain,
433 struct msi_domain_info *info,
434 struct device *dev)
435{
436 return 0;
437}
438
439static struct msi_domain_ops msi_domain_ops_default = {
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200440 .get_hwirq = msi_domain_ops_get_hwirq,
441 .msi_init = msi_domain_ops_init,
442 .msi_check = msi_domain_ops_check,
443 .msi_prepare = msi_domain_ops_prepare,
444 .set_desc = msi_domain_ops_set_desc,
445 .domain_alloc_irqs = __msi_domain_alloc_irqs,
446 .domain_free_irqs = __msi_domain_free_irqs,
Jiang Liuaeeb5962014-11-15 22:24:05 +0800447};
448
449static void msi_domain_update_dom_ops(struct msi_domain_info *info)
450{
451 struct msi_domain_ops *ops = info->ops;
452
453 if (ops == NULL) {
454 info->ops = &msi_domain_ops_default;
455 return;
456 }
457
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200458 if (ops->domain_alloc_irqs == NULL)
459 ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
460 if (ops->domain_free_irqs == NULL)
461 ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
462
463 if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
464 return;
465
Jiang Liuaeeb5962014-11-15 22:24:05 +0800466 if (ops->get_hwirq == NULL)
467 ops->get_hwirq = msi_domain_ops_default.get_hwirq;
468 if (ops->msi_init == NULL)
469 ops->msi_init = msi_domain_ops_default.msi_init;
470 if (ops->msi_check == NULL)
471 ops->msi_check = msi_domain_ops_default.msi_check;
472 if (ops->msi_prepare == NULL)
473 ops->msi_prepare = msi_domain_ops_default.msi_prepare;
474 if (ops->set_desc == NULL)
475 ops->set_desc = msi_domain_ops_default.set_desc;
476}
477
478static void msi_domain_update_chip_ops(struct msi_domain_info *info)
479{
480 struct irq_chip *chip = info->chip;
481
Marc Zyngier0701c532015-10-13 19:14:45 +0100482 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
Jiang Liuaeeb5962014-11-15 22:24:05 +0800483 if (!chip->irq_set_affinity)
484 chip->irq_set_affinity = msi_domain_set_affinity;
485}
486
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100487/**
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700488 * msi_create_irq_domain - Create an MSI interrupt domain
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100489 * @fwnode: Optional fwnode of the interrupt controller
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100490 * @info: MSI domain info
491 * @parent: Parent irq domain
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700492 *
493 * Return: pointer to the created &struct irq_domain or %NULL on failure
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100494 */
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100495struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100496 struct msi_domain_info *info,
497 struct irq_domain *parent)
498{
Marc Zyngiera97b8522017-05-12 12:55:37 +0100499 struct irq_domain *domain;
500
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200501 msi_domain_update_dom_ops(info);
Jiang Liuaeeb5962014-11-15 22:24:05 +0800502 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
503 msi_domain_update_chip_ops(info);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100504
Marc Zyngiera97b8522017-05-12 12:55:37 +0100505 domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
506 fwnode, &msi_domain_ops, info);
Thomas Gleixner01653082017-06-20 01:37:04 +0200507
508 if (domain && !domain->name && info->chip)
Marc Zyngiera97b8522017-05-12 12:55:37 +0100509 domain->name = info->chip->name;
510
511 return domain;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100512}
513
Marc Zyngierb2eba392015-11-23 08:26:05 +0000514int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
515 int nvec, msi_alloc_info_t *arg)
516{
517 struct msi_domain_info *info = domain->host_data;
518 struct msi_domain_ops *ops = info->ops;
519 int ret;
520
521 ret = ops->msi_check(domain, info, dev);
522 if (ret == 0)
523 ret = ops->msi_prepare(domain, dev, nvec, arg);
524
525 return ret;
526}
527
Marc Zyngier2145ac92015-11-23 08:26:06 +0000528int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
529 int virq, int nvec, msi_alloc_info_t *arg)
530{
531 struct msi_domain_info *info = domain->host_data;
532 struct msi_domain_ops *ops = info->ops;
533 struct msi_desc *desc;
534 int ret = 0;
535
536 for_each_msi_entry(desc, dev) {
537 /* Don't even try the multi-MSI brain damage. */
538 if (WARN_ON(!desc->irq || desc->nvec_used != 1)) {
539 ret = -EINVAL;
540 break;
541 }
542
543 if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
544 continue;
545
546 ops->set_desc(arg, desc);
547 /* Assumes the domain mutex is held! */
John Keeping596a7a12017-09-06 10:35:40 +0100548 ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
549 arg);
Marc Zyngier2145ac92015-11-23 08:26:06 +0000550 if (ret)
551 break;
552
John Keeping596a7a12017-09-06 10:35:40 +0100553 irq_set_msi_desc_off(desc->irq, 0, desc);
Marc Zyngier2145ac92015-11-23 08:26:06 +0000554 }
555
556 if (ret) {
557 /* Mop up the damage */
558 for_each_msi_entry(desc, dev) {
559 if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
560 continue;
561
562 irq_domain_free_irqs_common(domain, desc->irq, 1);
563 }
564 }
565
566 return ret;
567}
568
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100569/*
570 * Carefully check whether the device can use reservation mode. If
571 * reservation mode is enabled then the early activation will assign a
572 * dummy vector to the device. If the PCI/MSI device does not support
573 * masking of the entry then this can result in spurious interrupts when
574 * the device driver is not absolutely careful. But even then a malfunction
575 * of the hardware could result in a spurious interrupt on the dummy vector
576 * and render the device unusable. If the entry can be masked then the core
577 * logic will prevent the spurious interrupt and reservation mode can be
578 * used. For now reservation mode is restricted to PCI/MSI.
579 */
580static bool msi_check_reservation_mode(struct irq_domain *domain,
581 struct msi_domain_info *info,
582 struct device *dev)
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100583{
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100584 struct msi_desc *desc;
585
Thomas Gleixnerc6c9e2832020-08-26 13:16:51 +0200586 switch(domain->bus_token) {
587 case DOMAIN_BUS_PCI_MSI:
588 case DOMAIN_BUS_VMD_MSI:
589 break;
590 default:
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100591 return false;
Thomas Gleixnerc6c9e2832020-08-26 13:16:51 +0200592 }
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100593
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100594 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
595 return false;
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100596
597 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
598 return false;
599
600 /*
601 * Checking the first MSI descriptor is sufficient. MSIX supports
Thomas Gleixner9c8e9c92021-11-04 00:27:29 +0100602 * masking and MSI does so when the can_mask attribute is set.
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100603 */
604 desc = first_msi_entry(dev);
Thomas Gleixnere58f2252021-12-06 23:27:39 +0100605 return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100606}
607
Thomas Gleixner890337622021-12-06 23:27:59 +0100608static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
609 int allocated)
610{
611 switch(domain->bus_token) {
612 case DOMAIN_BUS_PCI_MSI:
613 case DOMAIN_BUS_VMD_MSI:
614 if (IS_ENABLED(CONFIG_PCI_MSI))
615 break;
616 fallthrough;
617 default:
618 return -ENOSPC;
619 }
620
621 /* Let a failed PCI multi MSI allocation retry */
622 if (desc->nvec_used > 1)
623 return 1;
624
625 /* If there was a successful allocation let the caller know */
626 return allocated ? allocated : -ENOSPC;
627}
628
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200629int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
630 int nvec)
Jiang Liud9109692014-11-15 22:24:04 +0800631{
632 struct msi_domain_info *info = domain->host_data;
633 struct msi_domain_ops *ops = info->ops;
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100634 struct irq_data *irq_data;
Jiang Liud9109692014-11-15 22:24:04 +0800635 struct msi_desc *desc;
Zenghui Yu06fde692020-12-18 14:00:39 +0800636 msi_alloc_info_t arg = { };
Thomas Gleixner890337622021-12-06 23:27:59 +0100637 int allocated = 0;
Thomas Gleixnerb6140912016-07-04 17:39:22 +0900638 int i, ret, virq;
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100639 bool can_reserve;
Jiang Liud9109692014-11-15 22:24:04 +0800640
Marc Zyngierb2eba392015-11-23 08:26:05 +0000641 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
Jiang Liud9109692014-11-15 22:24:04 +0800642 if (ret)
643 return ret;
644
645 for_each_msi_entry(desc, dev) {
646 ops->set_desc(&arg, desc);
647
Thomas Gleixnerb6140912016-07-04 17:39:22 +0900648 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900649 dev_to_node(dev), &arg, false,
Thomas Gleixner0972fa52016-07-04 17:39:26 +0900650 desc->affinity);
Jiang Liud9109692014-11-15 22:24:04 +0800651 if (virq < 0) {
Thomas Gleixner890337622021-12-06 23:27:59 +0100652 ret = msi_handle_pci_fail(domain, desc, allocated);
653 goto cleanup;
Jiang Liud9109692014-11-15 22:24:04 +0800654 }
655
Thomas Gleixner07557cc2017-09-13 23:29:05 +0200656 for (i = 0; i < desc->nvec_used; i++) {
Jiang Liud9109692014-11-15 22:24:04 +0800657 irq_set_msi_desc_off(virq, i, desc);
Thomas Gleixner07557cc2017-09-13 23:29:05 +0200658 irq_debugfs_copy_devname(virq + i, dev);
659 }
Thomas Gleixner890337622021-12-06 23:27:59 +0100660 allocated++;
Jiang Liud9109692014-11-15 22:24:04 +0800661 }
662
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100663 can_reserve = msi_check_reservation_mode(domain, info, dev);
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100664
Marc Zyngier4c457e82021-01-23 12:27:59 +0000665 /*
666 * This flag is set by the PCI layer as we need to activate
667 * the MSI entries before the PCI layer enables MSI in the
668 * card. Otherwise the card latches a random msi message.
669 */
670 if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
671 goto skip_activate;
672
673 for_each_msi_vector(desc, i, dev) {
674 if (desc->irq == i) {
675 virq = desc->irq;
Jiang Liud9109692014-11-15 22:24:04 +0800676 dev_dbg(dev, "irq [%d-%d] for MSI\n",
677 virq, virq + desc->nvec_used - 1);
Marc Zyngier4c457e82021-01-23 12:27:59 +0000678 }
Marc Zyngierf3b09462016-07-13 17:18:33 +0100679
Marc Zyngier4c457e82021-01-23 12:27:59 +0000680 irq_data = irq_domain_get_irq_data(domain, i);
Thomas Gleixner6f1a4892020-01-31 15:26:52 +0100681 if (!can_reserve) {
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100682 irqd_clr_can_reserve(irq_data);
Thomas Gleixner6f1a4892020-01-31 15:26:52 +0100683 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
684 irqd_set_msi_nomask_quirk(irq_data);
685 }
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100686 ret = irq_domain_activate_irq(irq_data, can_reserve);
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100687 if (ret)
688 goto cleanup;
689 }
690
Marc Zyngier4c457e82021-01-23 12:27:59 +0000691skip_activate:
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100692 /*
693 * If these interrupts use reservation mode, clear the activated bit
694 * so request_irq() will assign the final vector.
695 */
696 if (can_reserve) {
Marc Zyngier4c457e82021-01-23 12:27:59 +0000697 for_each_msi_vector(desc, i, dev) {
698 irq_data = irq_domain_get_irq_data(domain, i);
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100699 irqd_clr_activated(irq_data);
Marc Zyngierf3b09462016-07-13 17:18:33 +0100700 }
Jiang Liud9109692014-11-15 22:24:04 +0800701 }
Jiang Liud9109692014-11-15 22:24:04 +0800702 return 0;
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +0200703
704cleanup:
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +0200705 msi_domain_free_irqs(domain, dev);
706 return ret;
Jiang Liud9109692014-11-15 22:24:04 +0800707}
708
709/**
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200710 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
711 * @domain: The domain to allocate from
Jiang Liud9109692014-11-15 22:24:04 +0800712 * @dev: Pointer to device struct of the device for which the interrupts
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200713 * are allocated
714 * @nvec: The number of interrupts to allocate
715 *
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700716 * Return: %0 on success or an error code.
Jiang Liud9109692014-11-15 22:24:04 +0800717 */
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200718int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
719 int nvec)
720{
721 struct msi_domain_info *info = domain->host_data;
722 struct msi_domain_ops *ops = info->ops;
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100723 int ret;
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200724
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100725 ret = ops->domain_alloc_irqs(domain, dev, nvec);
726 if (ret)
727 return ret;
728
729 if (!(info->flags & MSI_FLAG_DEV_SYSFS))
730 return 0;
731
732 ret = msi_device_populate_sysfs(dev);
733 if (ret)
734 msi_domain_free_irqs(domain, dev);
735 return ret;
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200736}
737
738void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
Jiang Liud9109692014-11-15 22:24:04 +0800739{
Bixuan Cuidbbc9352021-05-18 11:31:17 +0800740 struct irq_data *irq_data;
Jiang Liud9109692014-11-15 22:24:04 +0800741 struct msi_desc *desc;
Bixuan Cuidbbc9352021-05-18 11:31:17 +0800742 int i;
743
744 for_each_msi_vector(desc, i, dev) {
745 irq_data = irq_domain_get_irq_data(domain, i);
746 if (irqd_is_activated(irq_data))
747 irq_domain_deactivate_irq(irq_data);
748 }
Jiang Liud9109692014-11-15 22:24:04 +0800749
750 for_each_msi_entry(desc, dev) {
Marc Zyngierfe0c52f2015-01-26 19:10:19 +0000751 /*
752 * We might have failed to allocate an MSI early
753 * enough that there is no IRQ associated to this
754 * entry. If that's the case, don't do anything.
755 */
756 if (desc->irq) {
757 irq_domain_free_irqs(desc->irq, desc->nvec_used);
758 desc->irq = 0;
759 }
Jiang Liud9109692014-11-15 22:24:04 +0800760 }
761}
762
763/**
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700764 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200765 * @domain: The domain to managing the interrupts
766 * @dev: Pointer to device struct of the device for which the interrupts
767 * are free
768 */
769void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
770{
771 struct msi_domain_info *info = domain->host_data;
772 struct msi_domain_ops *ops = info->ops;
773
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100774 if (info->flags & MSI_FLAG_DEV_SYSFS)
775 msi_device_destroy_sysfs(dev);
776 ops->domain_free_irqs(domain, dev);
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200777}
778
779/**
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100780 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
781 * @domain: The interrupt domain to retrieve data from
782 *
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700783 * Return: the pointer to the msi_domain_info stored in @domain->host_data.
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100784 */
785struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
786{
787 return (struct msi_domain_info *)domain->host_data;
788}
789
790#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */