blob: 745434efb55715f6429e39c886759dc30f74d4b9 [file] [log] [blame]
Thomas Gleixner52a65ff2018-03-14 22:15:19 +01001// SPDX-License-Identifier: GPL-2.0
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01002/*
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01003 * Copyright (C) 2014 Intel Corp.
4 * Author: Jiang Liu <jiang.liu@linux.intel.com>
5 *
6 * This file is licensed under GPLv2.
7 *
Ingo Molnara359f752021-03-22 04:21:30 +01008 * This file contains common code to support Message Signaled Interrupts for
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01009 * PCI compatible and non PCI compatible devices.
10 */
Jiang Liuaeeb5962014-11-15 22:24:05 +080011#include <linux/types.h>
12#include <linux/device.h>
Jiang Liuf3cf8bb2014-11-12 11:39:03 +010013#include <linux/irq.h>
14#include <linux/irqdomain.h>
15#include <linux/msi.h>
Marc Zyngier4e201562016-11-22 09:21:16 +000016#include <linux/slab.h>
Thomas Gleixner3ba1f052021-12-06 23:27:31 +010017#include <linux/sysfs.h>
Barry Song2f170812021-08-13 15:56:27 +120018#include <linux/pci.h>
Jiang Liud9109692014-11-15 22:24:04 +080019
Thomas Gleixner07557cc2017-09-13 23:29:05 +020020#include "internals.h"
21
Thomas Gleixner28f4b042016-09-14 16:18:47 +020022/**
Randy Dunlap3b35e7e2021-08-10 16:48:35 -070023 * alloc_msi_entry - Allocate an initialized msi_desc
Thomas Gleixner28f4b042016-09-14 16:18:47 +020024 * @dev: Pointer to the device for which this is allocated
25 * @nvec: The number of vectors used in this entry
26 * @affinity: Optional pointer to an affinity mask array size of @nvec
27 *
Randy Dunlap3b35e7e2021-08-10 16:48:35 -070028 * If @affinity is not %NULL then an affinity array[@nvec] is allocated
Dou Liyangbec04032018-12-04 23:51:20 +080029 * and the affinity masks and flags from @affinity are copied.
Randy Dunlap3b35e7e2021-08-10 16:48:35 -070030 *
31 * Return: pointer to allocated &msi_desc on success or %NULL on failure
Thomas Gleixner28f4b042016-09-14 16:18:47 +020032 */
Dou Liyangbec04032018-12-04 23:51:20 +080033struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
34 const struct irq_affinity_desc *affinity)
Jiang Liuaa48b6f2015-07-09 16:00:47 +080035{
Thomas Gleixner28f4b042016-09-14 16:18:47 +020036 struct msi_desc *desc;
37
38 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
Jiang Liuaa48b6f2015-07-09 16:00:47 +080039 if (!desc)
40 return NULL;
41
42 INIT_LIST_HEAD(&desc->list);
43 desc->dev = dev;
Thomas Gleixner28f4b042016-09-14 16:18:47 +020044 desc->nvec_used = nvec;
45 if (affinity) {
46 desc->affinity = kmemdup(affinity,
47 nvec * sizeof(*desc->affinity), GFP_KERNEL);
48 if (!desc->affinity) {
49 kfree(desc);
50 return NULL;
51 }
52 }
Jiang Liuaa48b6f2015-07-09 16:00:47 +080053
54 return desc;
55}
56
57void free_msi_entry(struct msi_desc *entry)
58{
Thomas Gleixner28f4b042016-09-14 16:18:47 +020059 kfree(entry->affinity);
Jiang Liuaa48b6f2015-07-09 16:00:47 +080060 kfree(entry);
61}
62
Thomas Gleixner602905252021-12-06 23:51:10 +010063/**
64 * msi_add_msi_desc - Allocate and initialize a MSI descriptor
65 * @dev: Pointer to the device for which the descriptor is allocated
66 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
67 *
68 * Return: 0 on success or an appropriate failure code.
69 */
70int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
71{
72 struct msi_desc *desc;
73
74 lockdep_assert_held(&dev->msi.data->mutex);
75
76 desc = alloc_msi_entry(dev, init_desc->nvec_used, init_desc->affinity);
77 if (!desc)
78 return -ENOMEM;
79
80 /* Copy the MSI index and type specific data to the new descriptor. */
81 desc->msi_index = init_desc->msi_index;
82 desc->pci = init_desc->pci;
83
84 list_add_tail(&desc->list, &dev->msi.data->list);
85 return 0;
86}
87
88/**
89 * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
90 * @dev: Pointer to the device for which the descriptors are allocated
91 * @index: Index for the first MSI descriptor
92 * @ndesc: Number of descriptors to allocate
93 *
94 * Return: 0 on success or an appropriate failure code.
95 */
96static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
97{
98 struct msi_desc *desc, *tmp;
99 LIST_HEAD(list);
100 unsigned int i;
101
102 lockdep_assert_held(&dev->msi.data->mutex);
103
104 for (i = 0; i < ndesc; i++) {
105 desc = alloc_msi_entry(dev, 1, NULL);
106 if (!desc)
107 goto fail;
108 desc->msi_index = index + i;
109 list_add_tail(&desc->list, &list);
110 }
111 list_splice_tail(&list, &dev->msi.data->list);
112 return 0;
113
114fail:
115 list_for_each_entry_safe(desc, tmp, &list, list) {
116 list_del(&desc->list);
117 free_msi_entry(desc);
118 }
119 return -ENOMEM;
120}
121
Thomas Gleixner645474e22021-12-06 23:51:12 +0100122/**
123 * msi_free_msi_descs_range - Free MSI descriptors of a device
124 * @dev: Device to free the descriptors
125 * @filter: Descriptor state filter
126 * @first_index: Index to start freeing from
127 * @last_index: Last index to be freed
128 */
129void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
130 unsigned int first_index, unsigned int last_index)
131{
132 struct msi_desc *desc;
133
134 lockdep_assert_held(&dev->msi.data->mutex);
135
136 msi_for_each_desc(desc, dev, filter) {
137 /*
138 * Stupid for now to handle MSI device domain until the
139 * storage is switched over to an xarray.
140 */
141 if (desc->msi_index < first_index || desc->msi_index > last_index)
142 continue;
143 list_del(&desc->list);
144 free_msi_entry(desc);
145 }
146}
147
Jiang Liu38b6a1c2014-11-12 12:11:25 +0100148void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
149{
150 *msg = entry->msg;
151}
152
153void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
154{
155 struct msi_desc *entry = irq_get_msi_desc(irq);
156
157 __get_cached_msi_msg(entry, msg);
158}
159EXPORT_SYMBOL_GPL(get_cached_msi_msg);
160
Thomas Gleixner013bd8e2021-12-10 23:18:55 +0100161static void msi_device_data_release(struct device *dev, void *res)
162{
Thomas Gleixner125282c2021-12-06 23:51:04 +0100163 struct msi_device_data *md = res;
164
165 WARN_ON_ONCE(!list_empty(&md->list));
Thomas Gleixner013bd8e2021-12-10 23:18:55 +0100166 dev->msi.data = NULL;
167}
168
169/**
170 * msi_setup_device_data - Setup MSI device data
171 * @dev: Device for which MSI device data should be set up
172 *
173 * Return: 0 on success, appropriate error code otherwise
174 *
175 * This can be called more than once for @dev. If the MSI device data is
176 * already allocated the call succeeds. The allocated memory is
177 * automatically released when the device is destroyed.
178 */
179int msi_setup_device_data(struct device *dev)
180{
181 struct msi_device_data *md;
182
183 if (dev->msi.data)
184 return 0;
185
186 md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
187 if (!md)
188 return -ENOMEM;
189
Thomas Gleixner125282c2021-12-06 23:51:04 +0100190 INIT_LIST_HEAD(&md->list);
Thomas Gleixnerb5f687f2021-12-06 23:51:05 +0100191 mutex_init(&md->mutex);
Thomas Gleixner013bd8e2021-12-10 23:18:55 +0100192 dev->msi.data = md;
193 devres_add(dev, md);
194 return 0;
195}
196
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100197/**
Thomas Gleixnerb5f687f2021-12-06 23:51:05 +0100198 * msi_lock_descs - Lock the MSI descriptor storage of a device
199 * @dev: Device to operate on
200 */
201void msi_lock_descs(struct device *dev)
202{
203 mutex_lock(&dev->msi.data->mutex);
204}
205EXPORT_SYMBOL_GPL(msi_lock_descs);
206
207/**
208 * msi_unlock_descs - Unlock the MSI descriptor storage of a device
209 * @dev: Device to operate on
210 */
211void msi_unlock_descs(struct device *dev)
212{
Thomas Gleixner1046f712021-12-06 23:51:08 +0100213 /* Clear the next pointer which was cached by the iterator */
214 dev->msi.data->__next = NULL;
Thomas Gleixnerb5f687f2021-12-06 23:51:05 +0100215 mutex_unlock(&dev->msi.data->mutex);
216}
217EXPORT_SYMBOL_GPL(msi_unlock_descs);
218
Thomas Gleixner1046f712021-12-06 23:51:08 +0100219static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
220{
221 switch (filter) {
222 case MSI_DESC_ALL:
223 return true;
224 case MSI_DESC_NOTASSOCIATED:
225 return !desc->irq;
226 case MSI_DESC_ASSOCIATED:
227 return !!desc->irq;
228 }
229 WARN_ON_ONCE(1);
230 return false;
231}
232
233static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_filter filter)
234{
235 struct msi_desc *desc;
236
237 list_for_each_entry(desc, dev_to_msi_list(dev), list) {
238 if (msi_desc_match(desc, filter))
239 return desc;
240 }
241 return NULL;
242}
243
244/**
245 * msi_first_desc - Get the first MSI descriptor of a device
246 * @dev: Device to operate on
247 * @filter: Descriptor state filter
248 *
249 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
250 * must be invoked before the call.
251 *
252 * Return: Pointer to the first MSI descriptor matching the search
253 * criteria, NULL if none found.
254 */
255struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
256{
257 struct msi_desc *desc;
258
259 if (WARN_ON_ONCE(!dev->msi.data))
260 return NULL;
261
262 lockdep_assert_held(&dev->msi.data->mutex);
263
264 desc = msi_find_first_desc(dev, filter);
265 dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
266 return desc;
267}
268EXPORT_SYMBOL_GPL(msi_first_desc);
269
270static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter filter,
271 struct msi_desc *from)
272{
273 struct msi_desc *desc = from;
274
275 list_for_each_entry_from(desc, dev_to_msi_list(dev), list) {
276 if (msi_desc_match(desc, filter))
277 return desc;
278 }
279 return NULL;
280}
281
282/**
283 * msi_next_desc - Get the next MSI descriptor of a device
284 * @dev: Device to operate on
285 *
286 * The first invocation of msi_next_desc() has to be preceeded by a
287 * successful incovation of __msi_first_desc(). Consecutive invocations are
288 * only valid if the previous one was successful. All these operations have
289 * to be done within the same MSI mutex held region.
290 *
291 * Return: Pointer to the next MSI descriptor matching the search
292 * criteria, NULL if none found.
293 */
294struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
295{
296 struct msi_device_data *data = dev->msi.data;
297 struct msi_desc *desc;
298
299 if (WARN_ON_ONCE(!data))
300 return NULL;
301
302 lockdep_assert_held(&data->mutex);
303
304 if (!data->__next)
305 return NULL;
306
307 desc = __msi_next_desc(dev, filter, data->__next);
308 dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
309 return desc;
310}
311EXPORT_SYMBOL_GPL(msi_next_desc);
312
Thomas Gleixnerb5f687f2021-12-06 23:51:05 +0100313/**
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100314 * msi_get_virq - Return Linux interrupt number of a MSI interrupt
315 * @dev: Device to operate on
316 * @index: MSI interrupt index to look for (0-based)
317 *
318 * Return: The Linux interrupt number on success (> 0), 0 if not found
319 */
320unsigned int msi_get_virq(struct device *dev, unsigned int index)
321{
322 struct msi_desc *desc;
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100323 unsigned int ret = 0;
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100324 bool pcimsi;
325
326 if (!dev->msi.data)
327 return 0;
328
329 pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
330
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100331 msi_lock_descs(dev);
332 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100333 /* PCI-MSI has only one descriptor for multiple interrupts. */
334 if (pcimsi) {
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100335 if (index < desc->nvec_used)
336 ret = desc->irq + index;
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100337 break;
338 }
339
340 /*
341 * PCI-MSIX and platform MSI use a descriptor per
342 * interrupt.
343 */
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100344 if (desc->msi_index == index) {
345 ret = desc->irq;
346 break;
347 }
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100348 }
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100349 msi_unlock_descs(dev);
350 return ret;
Thomas Gleixnercf15f432021-12-10 23:19:23 +0100351}
352EXPORT_SYMBOL_GPL(msi_get_virq);
353
Thomas Gleixner11975282021-12-06 23:27:28 +0100354#ifdef CONFIG_SYSFS
Barry Song2f170812021-08-13 15:56:27 +1200355static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
356 char *buf)
357{
Thomas Gleixner6ef7f772021-12-10 23:18:49 +0100358 /* MSI vs. MSIX is per device not per interrupt */
359 bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
Barry Song2f170812021-08-13 15:56:27 +1200360
361 return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
362}
363
364/**
365 * msi_populate_sysfs - Populate msi_irqs sysfs entries for devices
366 * @dev: The device(PCI, platform etc) who will get sysfs entries
Barry Song2f170812021-08-13 15:56:27 +1200367 */
Thomas Gleixner24cff372021-12-10 23:19:08 +0100368static const struct attribute_group **msi_populate_sysfs(struct device *dev)
Barry Song2f170812021-08-13 15:56:27 +1200369{
370 const struct attribute_group **msi_irq_groups;
371 struct attribute **msi_attrs, *msi_attr;
372 struct device_attribute *msi_dev_attr;
373 struct attribute_group *msi_irq_group;
374 struct msi_desc *entry;
375 int ret = -ENOMEM;
376 int num_msi = 0;
377 int count = 0;
378 int i;
379
380 /* Determine how many msi entries we have */
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100381 msi_for_each_desc(entry, dev, MSI_DESC_ALL)
Barry Song2f170812021-08-13 15:56:27 +1200382 num_msi += entry->nvec_used;
383 if (!num_msi)
384 return NULL;
385
386 /* Dynamically create the MSI attributes for the device */
387 msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
388 if (!msi_attrs)
389 return ERR_PTR(-ENOMEM);
390
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100391 msi_for_each_desc(entry, dev, MSI_DESC_ALL) {
Barry Song2f170812021-08-13 15:56:27 +1200392 for (i = 0; i < entry->nvec_used; i++) {
393 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
394 if (!msi_dev_attr)
395 goto error_attrs;
396 msi_attrs[count] = &msi_dev_attr->attr;
397
398 sysfs_attr_init(&msi_dev_attr->attr);
399 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
400 entry->irq + i);
401 if (!msi_dev_attr->attr.name)
402 goto error_attrs;
403 msi_dev_attr->attr.mode = 0444;
404 msi_dev_attr->show = msi_mode_show;
405 ++count;
406 }
407 }
408
409 msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
410 if (!msi_irq_group)
411 goto error_attrs;
412 msi_irq_group->name = "msi_irqs";
413 msi_irq_group->attrs = msi_attrs;
414
415 msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
416 if (!msi_irq_groups)
417 goto error_irq_group;
418 msi_irq_groups[0] = msi_irq_group;
419
420 ret = sysfs_create_groups(&dev->kobj, msi_irq_groups);
421 if (ret)
422 goto error_irq_groups;
423
424 return msi_irq_groups;
425
426error_irq_groups:
427 kfree(msi_irq_groups);
428error_irq_group:
429 kfree(msi_irq_group);
430error_attrs:
431 count = 0;
432 msi_attr = msi_attrs[count];
433 while (msi_attr) {
434 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
435 kfree(msi_attr->name);
436 kfree(msi_dev_attr);
437 ++count;
438 msi_attr = msi_attrs[count];
439 }
440 kfree(msi_attrs);
441 return ERR_PTR(ret);
442}
443
444/**
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100445 * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
446 * @dev: The device (PCI, platform etc) which will get sysfs entries
447 */
448int msi_device_populate_sysfs(struct device *dev)
449{
450 const struct attribute_group **group = msi_populate_sysfs(dev);
451
452 if (IS_ERR(group))
453 return PTR_ERR(group);
454 dev->msi.data->attrs = group;
455 return 0;
456}
457
458/**
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100459 * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
460 * @dev: The device (PCI, platform etc) for which to remove
461 * sysfs entries
462 */
463void msi_device_destroy_sysfs(struct device *dev)
464{
Thomas Gleixner24cff372021-12-10 23:19:08 +0100465 const struct attribute_group **msi_irq_groups = dev->msi.data->attrs;
466 struct device_attribute *dev_attr;
467 struct attribute **msi_attrs;
468 int count = 0;
469
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100470 dev->msi.data->attrs = NULL;
Thomas Gleixner24cff372021-12-10 23:19:08 +0100471 if (!msi_irq_groups)
472 return;
473
474 sysfs_remove_groups(&dev->kobj, msi_irq_groups);
475 msi_attrs = msi_irq_groups[0]->attrs;
476 while (msi_attrs[count]) {
477 dev_attr = container_of(msi_attrs[count], struct device_attribute, attr);
478 kfree(dev_attr->attr.name);
479 kfree(dev_attr);
480 ++count;
481 }
482 kfree(msi_attrs);
483 kfree(msi_irq_groups[0]);
484 kfree(msi_irq_groups);
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100485}
Thomas Gleixner11975282021-12-06 23:27:28 +0100486#endif
Barry Song2f170812021-08-13 15:56:27 +1200487
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100488#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
Thomas Gleixner74faaf72014-12-06 21:20:20 +0100489static inline void irq_chip_write_msi_msg(struct irq_data *data,
490 struct msi_msg *msg)
491{
492 data->chip->irq_write_msi_msg(data, msg);
493}
494
Marc Zyngier0be81532018-05-08 13:14:30 +0100495static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
496{
497 struct msi_domain_info *info = domain->host_data;
498
499 /*
500 * If the MSI provider has messed with the second message and
501 * not advertized that it is level-capable, signal the breakage.
502 */
503 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
504 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
505 (msg[1].address_lo || msg[1].address_hi || msg[1].data));
506}
507
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100508/**
509 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
510 * @irq_data: The irq data associated to the interrupt
511 * @mask: The affinity mask to set
512 * @force: Flag to enforce setting (disable online checks)
513 *
514 * Intended to be used by MSI interrupt controllers which are
515 * implemented with hierarchical domains.
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700516 *
517 * Return: IRQ_SET_MASK_* result code
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100518 */
519int msi_domain_set_affinity(struct irq_data *irq_data,
520 const struct cpumask *mask, bool force)
521{
522 struct irq_data *parent = irq_data->parent_data;
Marc Zyngier0be81532018-05-08 13:14:30 +0100523 struct msi_msg msg[2] = { [1] = { }, };
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100524 int ret;
525
526 ret = parent->chip->irq_set_affinity(parent, mask, force);
527 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
Marc Zyngier0be81532018-05-08 13:14:30 +0100528 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
529 msi_check_level(irq_data->domain, msg);
530 irq_chip_write_msi_msg(irq_data, msg);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100531 }
532
533 return ret;
534}
535
Thomas Gleixner72491642017-09-13 23:29:10 +0200536static int msi_domain_activate(struct irq_domain *domain,
537 struct irq_data *irq_data, bool early)
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100538{
Marc Zyngier0be81532018-05-08 13:14:30 +0100539 struct msi_msg msg[2] = { [1] = { }, };
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100540
Marc Zyngier0be81532018-05-08 13:14:30 +0100541 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
542 msi_check_level(irq_data->domain, msg);
543 irq_chip_write_msi_msg(irq_data, msg);
Thomas Gleixner72491642017-09-13 23:29:10 +0200544 return 0;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100545}
546
547static void msi_domain_deactivate(struct irq_domain *domain,
548 struct irq_data *irq_data)
549{
Marc Zyngier0be81532018-05-08 13:14:30 +0100550 struct msi_msg msg[2];
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100551
Marc Zyngier0be81532018-05-08 13:14:30 +0100552 memset(msg, 0, sizeof(msg));
553 irq_chip_write_msi_msg(irq_data, msg);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100554}
555
556static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
557 unsigned int nr_irqs, void *arg)
558{
559 struct msi_domain_info *info = domain->host_data;
560 struct msi_domain_ops *ops = info->ops;
561 irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
562 int i, ret;
563
564 if (irq_find_mapping(domain, hwirq) > 0)
565 return -EEXIST;
566
Liu Jiangbf6f8692016-01-12 13:18:06 -0700567 if (domain->parent) {
568 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
569 if (ret < 0)
570 return ret;
571 }
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100572
573 for (i = 0; i < nr_irqs; i++) {
574 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
575 if (ret < 0) {
576 if (ops->msi_free) {
577 for (i--; i > 0; i--)
578 ops->msi_free(domain, info, virq + i);
579 }
580 irq_domain_free_irqs_top(domain, virq, nr_irqs);
581 return ret;
582 }
583 }
584
585 return 0;
586}
587
588static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
589 unsigned int nr_irqs)
590{
591 struct msi_domain_info *info = domain->host_data;
592 int i;
593
594 if (info->ops->msi_free) {
595 for (i = 0; i < nr_irqs; i++)
596 info->ops->msi_free(domain, info, virq + i);
597 }
598 irq_domain_free_irqs_top(domain, virq, nr_irqs);
599}
600
Krzysztof Kozlowski01364022015-04-27 21:54:23 +0900601static const struct irq_domain_ops msi_domain_ops = {
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100602 .alloc = msi_domain_alloc,
603 .free = msi_domain_free,
604 .activate = msi_domain_activate,
605 .deactivate = msi_domain_deactivate,
606};
607
Jiang Liuaeeb5962014-11-15 22:24:05 +0800608static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
609 msi_alloc_info_t *arg)
610{
611 return arg->hwirq;
612}
613
614static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
615 int nvec, msi_alloc_info_t *arg)
616{
617 memset(arg, 0, sizeof(*arg));
618 return 0;
619}
620
621static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
622 struct msi_desc *desc)
623{
624 arg->desc = desc;
625}
Jiang Liuaeeb5962014-11-15 22:24:05 +0800626
627static int msi_domain_ops_init(struct irq_domain *domain,
628 struct msi_domain_info *info,
629 unsigned int virq, irq_hw_number_t hwirq,
630 msi_alloc_info_t *arg)
631{
632 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
633 info->chip_data);
634 if (info->handler && info->handler_name) {
635 __irq_set_handler(virq, info->handler, 0, info->handler_name);
636 if (info->handler_data)
637 irq_set_handler_data(virq, info->handler_data);
638 }
639 return 0;
640}
641
642static int msi_domain_ops_check(struct irq_domain *domain,
643 struct msi_domain_info *info,
644 struct device *dev)
645{
646 return 0;
647}
648
649static struct msi_domain_ops msi_domain_ops_default = {
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200650 .get_hwirq = msi_domain_ops_get_hwirq,
651 .msi_init = msi_domain_ops_init,
652 .msi_check = msi_domain_ops_check,
653 .msi_prepare = msi_domain_ops_prepare,
654 .set_desc = msi_domain_ops_set_desc,
655 .domain_alloc_irqs = __msi_domain_alloc_irqs,
656 .domain_free_irqs = __msi_domain_free_irqs,
Jiang Liuaeeb5962014-11-15 22:24:05 +0800657};
658
659static void msi_domain_update_dom_ops(struct msi_domain_info *info)
660{
661 struct msi_domain_ops *ops = info->ops;
662
663 if (ops == NULL) {
664 info->ops = &msi_domain_ops_default;
665 return;
666 }
667
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200668 if (ops->domain_alloc_irqs == NULL)
669 ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
670 if (ops->domain_free_irqs == NULL)
671 ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
672
673 if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
674 return;
675
Jiang Liuaeeb5962014-11-15 22:24:05 +0800676 if (ops->get_hwirq == NULL)
677 ops->get_hwirq = msi_domain_ops_default.get_hwirq;
678 if (ops->msi_init == NULL)
679 ops->msi_init = msi_domain_ops_default.msi_init;
680 if (ops->msi_check == NULL)
681 ops->msi_check = msi_domain_ops_default.msi_check;
682 if (ops->msi_prepare == NULL)
683 ops->msi_prepare = msi_domain_ops_default.msi_prepare;
684 if (ops->set_desc == NULL)
685 ops->set_desc = msi_domain_ops_default.set_desc;
686}
687
688static void msi_domain_update_chip_ops(struct msi_domain_info *info)
689{
690 struct irq_chip *chip = info->chip;
691
Marc Zyngier0701c532015-10-13 19:14:45 +0100692 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
Jiang Liuaeeb5962014-11-15 22:24:05 +0800693 if (!chip->irq_set_affinity)
694 chip->irq_set_affinity = msi_domain_set_affinity;
695}
696
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100697/**
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700698 * msi_create_irq_domain - Create an MSI interrupt domain
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100699 * @fwnode: Optional fwnode of the interrupt controller
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100700 * @info: MSI domain info
701 * @parent: Parent irq domain
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700702 *
703 * Return: pointer to the created &struct irq_domain or %NULL on failure
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100704 */
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100705struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100706 struct msi_domain_info *info,
707 struct irq_domain *parent)
708{
Marc Zyngiera97b8522017-05-12 12:55:37 +0100709 struct irq_domain *domain;
710
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200711 msi_domain_update_dom_ops(info);
Jiang Liuaeeb5962014-11-15 22:24:05 +0800712 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
713 msi_domain_update_chip_ops(info);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100714
Marc Zyngiera97b8522017-05-12 12:55:37 +0100715 domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
716 fwnode, &msi_domain_ops, info);
Thomas Gleixner01653082017-06-20 01:37:04 +0200717
718 if (domain && !domain->name && info->chip)
Marc Zyngiera97b8522017-05-12 12:55:37 +0100719 domain->name = info->chip->name;
720
721 return domain;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100722}
723
Marc Zyngierb2eba392015-11-23 08:26:05 +0000724int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
725 int nvec, msi_alloc_info_t *arg)
726{
727 struct msi_domain_info *info = domain->host_data;
728 struct msi_domain_ops *ops = info->ops;
729 int ret;
730
731 ret = ops->msi_check(domain, info, dev);
732 if (ret == 0)
733 ret = ops->msi_prepare(domain, dev, nvec, arg);
734
735 return ret;
736}
737
Marc Zyngier2145ac92015-11-23 08:26:06 +0000738int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100739 int virq_base, int nvec, msi_alloc_info_t *arg)
Marc Zyngier2145ac92015-11-23 08:26:06 +0000740{
741 struct msi_domain_info *info = domain->host_data;
742 struct msi_domain_ops *ops = info->ops;
743 struct msi_desc *desc;
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100744 int ret, virq;
Marc Zyngier2145ac92015-11-23 08:26:06 +0000745
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100746 msi_lock_descs(dev);
747 for (virq = virq_base; virq < virq_base + nvec; virq++) {
748 desc = alloc_msi_entry(dev, 1, NULL);
749 if (!desc) {
750 ret = -ENOMEM;
751 goto fail;
Marc Zyngier2145ac92015-11-23 08:26:06 +0000752 }
753
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100754 desc->msi_index = virq;
755 desc->irq = virq;
756 list_add_tail(&desc->list, &dev->msi.data->list);
Marc Zyngier2145ac92015-11-23 08:26:06 +0000757
758 ops->set_desc(arg, desc);
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100759 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
Marc Zyngier2145ac92015-11-23 08:26:06 +0000760 if (ret)
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100761 goto fail;
Marc Zyngier2145ac92015-11-23 08:26:06 +0000762
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100763 irq_set_msi_desc(virq, desc);
Marc Zyngier2145ac92015-11-23 08:26:06 +0000764 }
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100765 msi_unlock_descs(dev);
766 return 0;
Marc Zyngier2145ac92015-11-23 08:26:06 +0000767
Thomas Gleixnera80713f2021-12-06 23:51:42 +0100768fail:
769 for (--virq; virq >= virq_base; virq--)
770 irq_domain_free_irqs_common(domain, virq, 1);
771 msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
772 msi_unlock_descs(dev);
Marc Zyngier2145ac92015-11-23 08:26:06 +0000773 return ret;
774}
775
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100776/*
777 * Carefully check whether the device can use reservation mode. If
778 * reservation mode is enabled then the early activation will assign a
779 * dummy vector to the device. If the PCI/MSI device does not support
780 * masking of the entry then this can result in spurious interrupts when
781 * the device driver is not absolutely careful. But even then a malfunction
782 * of the hardware could result in a spurious interrupt on the dummy vector
783 * and render the device unusable. If the entry can be masked then the core
784 * logic will prevent the spurious interrupt and reservation mode can be
785 * used. For now reservation mode is restricted to PCI/MSI.
786 */
787static bool msi_check_reservation_mode(struct irq_domain *domain,
788 struct msi_domain_info *info,
789 struct device *dev)
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100790{
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100791 struct msi_desc *desc;
792
Thomas Gleixnerc6c9e2832020-08-26 13:16:51 +0200793 switch(domain->bus_token) {
794 case DOMAIN_BUS_PCI_MSI:
795 case DOMAIN_BUS_VMD_MSI:
796 break;
797 default:
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100798 return false;
Thomas Gleixnerc6c9e2832020-08-26 13:16:51 +0200799 }
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100800
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100801 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
802 return false;
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100803
804 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
805 return false;
806
807 /*
808 * Checking the first MSI descriptor is sufficient. MSIX supports
Thomas Gleixner9c8e9c92021-11-04 00:27:29 +0100809 * masking and MSI does so when the can_mask attribute is set.
Thomas Gleixnerbc976232017-12-29 10:47:22 +0100810 */
Thomas Gleixner495c66a2021-12-06 23:51:45 +0100811 desc = msi_first_desc(dev, MSI_DESC_ALL);
Thomas Gleixnere58f2252021-12-06 23:27:39 +0100812 return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
Thomas Gleixnerda5dd9e2017-12-29 10:42:10 +0100813}
814
Thomas Gleixner890337622021-12-06 23:27:59 +0100815static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
816 int allocated)
817{
818 switch(domain->bus_token) {
819 case DOMAIN_BUS_PCI_MSI:
820 case DOMAIN_BUS_VMD_MSI:
821 if (IS_ENABLED(CONFIG_PCI_MSI))
822 break;
823 fallthrough;
824 default:
825 return -ENOSPC;
826 }
827
828 /* Let a failed PCI multi MSI allocation retry */
829 if (desc->nvec_used > 1)
830 return 1;
831
832 /* If there was a successful allocation let the caller know */
833 return allocated ? allocated : -ENOSPC;
834}
835
Thomas Gleixneref8dd012021-12-06 23:51:44 +0100836#define VIRQ_CAN_RESERVE 0x01
837#define VIRQ_ACTIVATE 0x02
838#define VIRQ_NOMASK_QUIRK 0x04
839
840static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
841{
842 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
843 int ret;
844
845 if (!(vflags & VIRQ_CAN_RESERVE)) {
846 irqd_clr_can_reserve(irqd);
847 if (vflags & VIRQ_NOMASK_QUIRK)
848 irqd_set_msi_nomask_quirk(irqd);
849 }
850
851 if (!(vflags & VIRQ_ACTIVATE))
852 return 0;
853
854 ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
855 if (ret)
856 return ret;
857 /*
858 * If the interrupt uses reservation mode, clear the activated bit
859 * so request_irq() will assign the final vector.
860 */
861 if (vflags & VIRQ_CAN_RESERVE)
862 irqd_clr_activated(irqd);
863 return 0;
864}
865
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200866int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
867 int nvec)
Jiang Liud9109692014-11-15 22:24:04 +0800868{
869 struct msi_domain_info *info = domain->host_data;
870 struct msi_domain_ops *ops = info->ops;
Zenghui Yu06fde692020-12-18 14:00:39 +0800871 msi_alloc_info_t arg = { };
Thomas Gleixneref8dd012021-12-06 23:51:44 +0100872 unsigned int vflags = 0;
873 struct msi_desc *desc;
Thomas Gleixner890337622021-12-06 23:27:59 +0100874 int allocated = 0;
Thomas Gleixnerb6140912016-07-04 17:39:22 +0900875 int i, ret, virq;
Jiang Liud9109692014-11-15 22:24:04 +0800876
Marc Zyngierb2eba392015-11-23 08:26:05 +0000877 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
Jiang Liud9109692014-11-15 22:24:04 +0800878 if (ret)
879 return ret;
880
Thomas Gleixneref8dd012021-12-06 23:51:44 +0100881 /*
882 * This flag is set by the PCI layer as we need to activate
883 * the MSI entries before the PCI layer enables MSI in the
884 * card. Otherwise the card latches a random msi message.
885 */
886 if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
887 vflags |= VIRQ_ACTIVATE;
888
889 /*
890 * Interrupt can use a reserved vector and will not occupy
891 * a real device vector until the interrupt is requested.
892 */
893 if (msi_check_reservation_mode(domain, info, dev)) {
894 vflags |= VIRQ_CAN_RESERVE;
895 /*
896 * MSI affinity setting requires a special quirk (X86) when
897 * reservation mode is active.
898 */
899 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
900 vflags |= VIRQ_NOMASK_QUIRK;
901 }
902
903 msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
Jiang Liud9109692014-11-15 22:24:04 +0800904 ops->set_desc(&arg, desc);
905
Thomas Gleixnerb6140912016-07-04 17:39:22 +0900906 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
Thomas Gleixner06ee6d52016-07-04 17:39:24 +0900907 dev_to_node(dev), &arg, false,
Thomas Gleixner0972fa52016-07-04 17:39:26 +0900908 desc->affinity);
Thomas Gleixner0f62d942021-12-06 23:51:07 +0100909 if (virq < 0)
910 return msi_handle_pci_fail(domain, desc, allocated);
Jiang Liud9109692014-11-15 22:24:04 +0800911
Thomas Gleixner07557cc2017-09-13 23:29:05 +0200912 for (i = 0; i < desc->nvec_used; i++) {
Jiang Liud9109692014-11-15 22:24:04 +0800913 irq_set_msi_desc_off(virq, i, desc);
Thomas Gleixner07557cc2017-09-13 23:29:05 +0200914 irq_debugfs_copy_devname(virq + i, dev);
Thomas Gleixneref8dd012021-12-06 23:51:44 +0100915 ret = msi_init_virq(domain, virq + i, vflags);
916 if (ret)
917 return ret;
Thomas Gleixner07557cc2017-09-13 23:29:05 +0200918 }
Thomas Gleixner890337622021-12-06 23:27:59 +0100919 allocated++;
Jiang Liud9109692014-11-15 22:24:04 +0800920 }
Jiang Liud9109692014-11-15 22:24:04 +0800921 return 0;
Thomas Gleixner0f62d942021-12-06 23:51:07 +0100922}
923
Thomas Gleixner645474e22021-12-06 23:51:12 +0100924static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
925 struct device *dev,
926 unsigned int num_descs)
927{
928 if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
929 return 0;
930
931 return msi_add_simple_msi_descs(dev, 0, num_descs);
932}
933
Thomas Gleixner0f62d942021-12-06 23:51:07 +0100934/**
935 * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
936 * @domain: The domain to allocate from
937 * @dev: Pointer to device struct of the device for which the interrupts
938 * are allocated
939 * @nvec: The number of interrupts to allocate
940 *
941 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
942 * pair. Use this for MSI irqdomains which implement their own vector
943 * allocation/free.
944 *
945 * Return: %0 on success or an error code.
946 */
947int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
948 int nvec)
949{
950 struct msi_domain_info *info = domain->host_data;
951 struct msi_domain_ops *ops = info->ops;
952 int ret;
953
954 lockdep_assert_held(&dev->msi.data->mutex);
955
Thomas Gleixner645474e22021-12-06 23:51:12 +0100956 ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
957 if (ret)
958 return ret;
959
Thomas Gleixner0f62d942021-12-06 23:51:07 +0100960 ret = ops->domain_alloc_irqs(domain, dev, nvec);
961 if (ret)
962 goto cleanup;
963
964 if (!(info->flags & MSI_FLAG_DEV_SYSFS))
965 return 0;
966
967 ret = msi_device_populate_sysfs(dev);
968 if (ret)
969 goto cleanup;
970 return 0;
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +0200971
972cleanup:
Thomas Gleixner0f62d942021-12-06 23:51:07 +0100973 msi_domain_free_irqs_descs_locked(domain, dev);
Thomas Gleixnerbb9b4282017-09-13 23:29:11 +0200974 return ret;
Jiang Liud9109692014-11-15 22:24:04 +0800975}
976
977/**
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200978 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
979 * @domain: The domain to allocate from
Jiang Liud9109692014-11-15 22:24:04 +0800980 * @dev: Pointer to device struct of the device for which the interrupts
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200981 * are allocated
982 * @nvec: The number of interrupts to allocate
983 *
Randy Dunlap3b35e7e2021-08-10 16:48:35 -0700984 * Return: %0 on success or an error code.
Jiang Liud9109692014-11-15 22:24:04 +0800985 */
Thomas Gleixner0f62d942021-12-06 23:51:07 +0100986int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200987{
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100988 int ret;
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200989
Thomas Gleixner0f62d942021-12-06 23:51:07 +0100990 msi_lock_descs(dev);
991 ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
992 msi_unlock_descs(dev);
Thomas Gleixnerbf6e0542021-12-10 23:19:03 +0100993 return ret;
Thomas Gleixner43e9e702020-08-26 13:16:57 +0200994}
995
996void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
Jiang Liud9109692014-11-15 22:24:04 +0800997{
Thomas Gleixneref8dd012021-12-06 23:51:44 +0100998 struct irq_data *irqd;
Jiang Liud9109692014-11-15 22:24:04 +0800999 struct msi_desc *desc;
Bixuan Cuidbbc9352021-05-18 11:31:17 +08001000 int i;
1001
Thomas Gleixneref8dd012021-12-06 23:51:44 +01001002 /* Only handle MSI entries which have an interrupt associated */
1003 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
1004 /* Make sure all interrupts are deactivated */
1005 for (i = 0; i < desc->nvec_used; i++) {
1006 irqd = irq_domain_get_irq_data(domain, desc->irq + i);
1007 if (irqd && irqd_is_activated(irqd))
1008 irq_domain_deactivate_irq(irqd);
Marc Zyngierfe0c52f2015-01-26 19:10:19 +00001009 }
Thomas Gleixneref8dd012021-12-06 23:51:44 +01001010
1011 irq_domain_free_irqs(desc->irq, desc->nvec_used);
1012 desc->irq = 0;
Jiang Liud9109692014-11-15 22:24:04 +08001013 }
1014}
1015
Thomas Gleixner645474e22021-12-06 23:51:12 +01001016static void msi_domain_free_msi_descs(struct msi_domain_info *info,
1017 struct device *dev)
1018{
1019 if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
1020 msi_free_msi_descs(dev);
1021}
1022
Jiang Liud9109692014-11-15 22:24:04 +08001023/**
Thomas Gleixner0f62d942021-12-06 23:51:07 +01001024 * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
1025 * @domain: The domain to managing the interrupts
1026 * @dev: Pointer to device struct of the device for which the interrupts
1027 * are free
1028 *
1029 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
1030 * pair. Use this for MSI irqdomains which implement their own vector
1031 * allocation.
1032 */
1033void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
1034{
1035 struct msi_domain_info *info = domain->host_data;
1036 struct msi_domain_ops *ops = info->ops;
1037
1038 lockdep_assert_held(&dev->msi.data->mutex);
1039
1040 if (info->flags & MSI_FLAG_DEV_SYSFS)
1041 msi_device_destroy_sysfs(dev);
1042 ops->domain_free_irqs(domain, dev);
Thomas Gleixner645474e22021-12-06 23:51:12 +01001043 msi_domain_free_msi_descs(info, dev);
Thomas Gleixner0f62d942021-12-06 23:51:07 +01001044}
1045
1046/**
Randy Dunlap3b35e7e2021-08-10 16:48:35 -07001047 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
Thomas Gleixner43e9e702020-08-26 13:16:57 +02001048 * @domain: The domain to managing the interrupts
1049 * @dev: Pointer to device struct of the device for which the interrupts
1050 * are free
1051 */
1052void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
1053{
Thomas Gleixner0f62d942021-12-06 23:51:07 +01001054 msi_lock_descs(dev);
1055 msi_domain_free_irqs_descs_locked(domain, dev);
1056 msi_unlock_descs(dev);
Thomas Gleixner43e9e702020-08-26 13:16:57 +02001057}
1058
1059/**
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01001060 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1061 * @domain: The interrupt domain to retrieve data from
1062 *
Randy Dunlap3b35e7e2021-08-10 16:48:35 -07001063 * Return: the pointer to the msi_domain_info stored in @domain->host_data.
Jiang Liuf3cf8bb2014-11-12 11:39:03 +01001064 */
1065struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1066{
1067 return (struct msi_domain_info *)domain->host_data;
1068}
1069
1070#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */