Bjorn Helgaas | 7328c8f | 2018-01-26 11:45:16 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Bjorn Helgaas | df62ab5 | 2018-03-09 16:36:33 -0600 | [diff] [blame] | 3 | * PCI Message Signaled Interrupt (MSI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * Copyright (C) 2003-2004 Intel |
| 6 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 7 | * Copyright (C) 2016 Christoph Hellwig. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
Thomas Gleixner | 29a03ad | 2021-12-06 23:27:44 +0100 | [diff] [blame] | 9 | #include <linux/err.h> |
| 10 | #include <linux/export.h> |
| 11 | #include <linux/irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Thomas Gleixner | 288c81c | 2021-12-06 23:27:47 +0100 | [diff] [blame] | 13 | #include "../pci.h" |
Thomas Gleixner | aa423ac | 2021-12-06 23:27:52 +0100 | [diff] [blame] | 14 | #include "msi.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | static int pci_msi_enable = 1; |
Yijing Wang | 38737d8 | 2014-10-27 10:44:36 +0800 | [diff] [blame] | 17 | int pci_msi_ignore_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 19 | static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | { |
Thomas Gleixner | cd119b0 | 2021-12-06 23:27:56 +0100 | [diff] [blame] | 21 | raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock; |
Thomas Gleixner | 77e89af | 2021-07-29 23:51:47 +0200 | [diff] [blame] | 22 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 24 | if (!desc->pci.msi_attrib.can_mask) |
Thomas Gleixner | 9c8e9c9 | 2021-11-04 00:27:29 +0100 | [diff] [blame] | 25 | return; |
| 26 | |
Thomas Gleixner | 77e89af | 2021-07-29 23:51:47 +0200 | [diff] [blame] | 27 | raw_spin_lock_irqsave(lock, flags); |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 28 | desc->pci.msi_mask &= ~clear; |
| 29 | desc->pci.msi_mask |= set; |
| 30 | pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos, |
| 31 | desc->pci.msi_mask); |
Thomas Gleixner | 77e89af | 2021-07-29 23:51:47 +0200 | [diff] [blame] | 32 | raw_spin_unlock_irqrestore(lock, flags); |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 33 | } |
| 34 | |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 35 | static inline void pci_msi_mask(struct msi_desc *desc, u32 mask) |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 36 | { |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 37 | pci_msi_update_mask(desc, 0, mask); |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 38 | } |
| 39 | |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 40 | static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask) |
Christoph Hellwig | 5eb6d66 | 2016-07-12 18:20:14 +0900 | [diff] [blame] | 41 | { |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 42 | pci_msi_update_mask(desc, mask, 0); |
| 43 | } |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 44 | |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 45 | static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc) |
| 46 | { |
Thomas Gleixner | 173ffad | 2021-12-10 23:19:18 +0100 | [diff] [blame] | 47 | return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE; |
Christoph Hellwig | 5eb6d66 | 2016-07-12 18:20:14 +0900 | [diff] [blame] | 48 | } |
| 49 | |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 50 | /* |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 51 | * This internal function does not flush PCI writes to the device. All |
| 52 | * users must ensure that they read from the device before either assuming |
| 53 | * that the device state is up to date, or returning out of this file. |
| 54 | * It does not affect the msi_desc::msix_ctrl cache either. Use with care! |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 55 | */ |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 56 | static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl) |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 57 | { |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 58 | void __iomem *desc_addr = pci_msix_desc_addr(desc); |
Yijing Wang | 38737d8 | 2014-10-27 10:44:36 +0800 | [diff] [blame] | 59 | |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 60 | if (desc->pci.msi_attrib.can_mask) |
Thomas Gleixner | 9c8e9c9 | 2021-11-04 00:27:29 +0100 | [diff] [blame] | 61 | writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 62 | } |
| 63 | |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 64 | static inline void pci_msix_mask(struct msi_desc *desc) |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 65 | { |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 66 | desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT; |
| 67 | pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl); |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 68 | /* Flush write to device */ |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 69 | readl(desc->pci.mask_base); |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 70 | } |
| 71 | |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 72 | static inline void pci_msix_unmask(struct msi_desc *desc) |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 73 | { |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 74 | desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; |
| 75 | pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl); |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 76 | } |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 77 | |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 78 | static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask) |
| 79 | { |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 80 | if (desc->pci.msi_attrib.is_msix) |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 81 | pci_msix_mask(desc); |
Thomas Gleixner | 9c8e9c9 | 2021-11-04 00:27:29 +0100 | [diff] [blame] | 82 | else |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 83 | pci_msi_mask(desc, mask); |
| 84 | } |
| 85 | |
| 86 | static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask) |
| 87 | { |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 88 | if (desc->pci.msi_attrib.is_msix) |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 89 | pci_msix_unmask(desc); |
Thomas Gleixner | 9c8e9c9 | 2021-11-04 00:27:29 +0100 | [diff] [blame] | 90 | else |
Thomas Gleixner | fcacdfb | 2021-08-09 21:08:56 +0200 | [diff] [blame] | 91 | pci_msi_unmask(desc, mask); |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 92 | } |
| 93 | |
Thomas Gleixner | 23ed8d5 | 2014-11-23 11:55:58 +0100 | [diff] [blame] | 94 | /** |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 95 | * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts |
Thomas Gleixner | 23ed8d5 | 2014-11-23 11:55:58 +0100 | [diff] [blame] | 96 | * @data: pointer to irqdata associated to that interrupt |
| 97 | */ |
| 98 | void pci_msi_mask_irq(struct irq_data *data) |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 99 | { |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 100 | struct msi_desc *desc = irq_data_get_msi_desc(data); |
| 101 | |
| 102 | __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq)); |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 103 | } |
Jake Oshins | a4289dc | 2015-12-10 17:52:59 +0000 | [diff] [blame] | 104 | EXPORT_SYMBOL_GPL(pci_msi_mask_irq); |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 105 | |
Thomas Gleixner | 23ed8d5 | 2014-11-23 11:55:58 +0100 | [diff] [blame] | 106 | /** |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 107 | * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts |
Thomas Gleixner | 23ed8d5 | 2014-11-23 11:55:58 +0100 | [diff] [blame] | 108 | * @data: pointer to irqdata associated to that interrupt |
| 109 | */ |
| 110 | void pci_msi_unmask_irq(struct irq_data *data) |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 111 | { |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 112 | struct msi_desc *desc = irq_data_get_msi_desc(data); |
| 113 | |
| 114 | __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | } |
Jake Oshins | a4289dc | 2015-12-10 17:52:59 +0000 | [diff] [blame] | 116 | EXPORT_SYMBOL_GPL(pci_msi_unmask_irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
Jiang Liu | 891d4a4 | 2014-11-09 23:10:33 +0800 | [diff] [blame] | 118 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 119 | { |
Jiang Liu | e39758e | 2015-07-09 16:00:43 +0800 | [diff] [blame] | 120 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); |
| 121 | |
| 122 | BUG_ON(dev->current_state != PCI_D0); |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 123 | |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 124 | if (entry->pci.msi_attrib.is_msix) { |
Christoph Hellwig | 5eb6d66 | 2016-07-12 18:20:14 +0900 | [diff] [blame] | 125 | void __iomem *base = pci_msix_desc_addr(entry); |
Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 126 | |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 127 | if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual)) |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 128 | return; |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 129 | |
Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 130 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); |
| 131 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); |
| 132 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA); |
| 133 | } else { |
Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 134 | int pos = dev->msi_cap; |
Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 135 | u16 data; |
| 136 | |
Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 137 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, |
| 138 | &msg->address_lo); |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 139 | if (entry->pci.msi_attrib.is_64) { |
Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 140 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, |
| 141 | &msg->address_hi); |
Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 142 | pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data); |
Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 143 | } else { |
| 144 | msg->address_hi = 0; |
Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 145 | pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data); |
Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 146 | } |
| 147 | msg->data = data; |
| 148 | } |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 149 | } |
| 150 | |
Jiang Liu | 83a1891 | 2014-11-09 23:10:34 +0800 | [diff] [blame] | 151 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 152 | { |
Jiang Liu | e39758e | 2015-07-09 16:00:43 +0800 | [diff] [blame] | 153 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); |
| 154 | |
Keith Busch | 0170591 | 2017-03-29 22:49:11 -0500 | [diff] [blame] | 155 | if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { |
Ben Hutchings | fcd097f | 2010-06-17 20:16:36 +0100 | [diff] [blame] | 156 | /* Don't touch the hardware now */ |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 157 | } else if (entry->pci.msi_attrib.is_msix) { |
Christoph Hellwig | 5eb6d66 | 2016-07-12 18:20:14 +0900 | [diff] [blame] | 158 | void __iomem *base = pci_msix_desc_addr(entry); |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 159 | u32 ctrl = entry->pci.msix_ctrl; |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 160 | bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT); |
Matthew Wilcox | 24d2755 | 2009-03-17 08:54:06 -0400 | [diff] [blame] | 161 | |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 162 | if (entry->pci.msi_attrib.is_virtual) |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 163 | goto skip; |
| 164 | |
Thomas Gleixner | da181dc | 2021-07-29 23:51:42 +0200 | [diff] [blame] | 165 | /* |
| 166 | * The specification mandates that the entry is masked |
| 167 | * when the message is modified: |
| 168 | * |
| 169 | * "If software changes the Address or Data value of an |
| 170 | * entry while the entry is unmasked, the result is |
| 171 | * undefined." |
| 172 | */ |
| 173 | if (unmasked) |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 174 | pci_msix_write_vector_ctrl(entry, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT); |
Thomas Gleixner | da181dc | 2021-07-29 23:51:42 +0200 | [diff] [blame] | 175 | |
Hidetoshi Seto | 2c21fd4 | 2009-06-23 17:40:04 +0900 | [diff] [blame] | 176 | writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); |
| 177 | writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); |
| 178 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA); |
Thomas Gleixner | da181dc | 2021-07-29 23:51:42 +0200 | [diff] [blame] | 179 | |
| 180 | if (unmasked) |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 181 | pci_msix_write_vector_ctrl(entry, ctrl); |
Thomas Gleixner | b9255a7 | 2021-07-29 23:51:43 +0200 | [diff] [blame] | 182 | |
| 183 | /* Ensure that the writes are visible in the device */ |
| 184 | readl(base + PCI_MSIX_ENTRY_DATA); |
Matthew Wilcox | 24d2755 | 2009-03-17 08:54:06 -0400 | [diff] [blame] | 185 | } else { |
Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 186 | int pos = dev->msi_cap; |
Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 187 | u16 msgctl; |
| 188 | |
Bjorn Helgaas | f84ecd28 | 2013-04-17 17:38:32 -0600 | [diff] [blame] | 189 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); |
Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 190 | msgctl &= ~PCI_MSI_FLAGS_QSIZE; |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 191 | msgctl |= entry->pci.msi_attrib.multiple << 4; |
Bjorn Helgaas | f84ecd28 | 2013-04-17 17:38:32 -0600 | [diff] [blame] | 192 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 193 | |
Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 194 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, |
| 195 | msg->address_lo); |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 196 | if (entry->pci.msi_attrib.is_64) { |
Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 197 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, |
| 198 | msg->address_hi); |
Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 199 | pci_write_config_word(dev, pos + PCI_MSI_DATA_64, |
| 200 | msg->data); |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 201 | } else { |
Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 202 | pci_write_config_word(dev, pos + PCI_MSI_DATA_32, |
| 203 | msg->data); |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 204 | } |
Thomas Gleixner | b9255a7 | 2021-07-29 23:51:43 +0200 | [diff] [blame] | 205 | /* Ensure that the writes are visible in the device */ |
| 206 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 207 | } |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 208 | |
| 209 | skip: |
Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 210 | entry->msg = *msg; |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 211 | |
| 212 | if (entry->write_msi_msg) |
| 213 | entry->write_msi_msg(entry, entry->write_msi_msg_data); |
| 214 | |
Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 215 | } |
| 216 | |
Jiang Liu | 83a1891 | 2014-11-09 23:10:34 +0800 | [diff] [blame] | 217 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) |
Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 218 | { |
Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 219 | struct msi_desc *entry = irq_get_msi_desc(irq); |
Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 220 | |
Jiang Liu | 83a1891 | 2014-11-09 23:10:34 +0800 | [diff] [blame] | 221 | __pci_write_msi_msg(entry, msg); |
Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 222 | } |
Jiang Liu | 83a1891 | 2014-11-09 23:10:34 +0800 | [diff] [blame] | 223 | EXPORT_SYMBOL_GPL(pci_write_msi_msg); |
Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 224 | |
Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 225 | static void free_msi_irqs(struct pci_dev *dev) |
| 226 | { |
Jiang Liu | 8e047ad | 2014-11-15 22:24:07 +0800 | [diff] [blame] | 227 | pci_msi_teardown_msi_irqs(dev); |
Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 228 | |
Thomas Gleixner | 85aa607 | 2021-12-06 23:27:54 +0100 | [diff] [blame] | 229 | if (dev->msix_base) { |
| 230 | iounmap(dev->msix_base); |
| 231 | dev->msix_base = NULL; |
| 232 | } |
Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 233 | } |
Satoru Takeuchi | c54c187 | 2007-01-18 13:50:05 +0900 | [diff] [blame] | 234 | |
David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 235 | static void pci_intx_for_msi(struct pci_dev *dev, int enable) |
| 236 | { |
| 237 | if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) |
| 238 | pci_intx(dev, enable); |
| 239 | } |
| 240 | |
Bjorn Helgaas | 830dfe8 | 2020-12-03 12:51:09 -0600 | [diff] [blame] | 241 | static void pci_msi_set_enable(struct pci_dev *dev, int enable) |
| 242 | { |
| 243 | u16 control; |
| 244 | |
| 245 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); |
| 246 | control &= ~PCI_MSI_FLAGS_ENABLE; |
| 247 | if (enable) |
| 248 | control |= PCI_MSI_FLAGS_ENABLE; |
| 249 | pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); |
| 250 | } |
| 251 | |
Thomas Gleixner | ae72f31 | 2021-12-06 23:27:42 +0100 | [diff] [blame] | 252 | /* |
| 253 | * Architecture override returns true when the PCI MSI message should be |
| 254 | * written by the generic restore function. |
| 255 | */ |
| 256 | bool __weak arch_restore_msi_irqs(struct pci_dev *dev) |
| 257 | { |
| 258 | return true; |
| 259 | } |
| 260 | |
Michael Ellerman | 8fed4b6 | 2007-01-25 19:34:08 +1100 | [diff] [blame] | 261 | static void __pci_restore_msi_state(struct pci_dev *dev) |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 262 | { |
Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 263 | struct msi_desc *entry; |
Thomas Gleixner | ae72f31 | 2021-12-06 23:27:42 +0100 | [diff] [blame] | 264 | u16 control; |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 265 | |
Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 266 | if (!dev->msi_enabled) |
| 267 | return; |
| 268 | |
Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 269 | entry = irq_get_msi_desc(dev->irq); |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 270 | |
David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 271 | pci_intx_for_msi(dev, 0); |
Michael S. Tsirkin | 61b64ab | 2015-05-07 09:52:21 -0500 | [diff] [blame] | 272 | pci_msi_set_enable(dev, 0); |
Thomas Gleixner | ae72f31 | 2021-12-06 23:27:42 +0100 | [diff] [blame] | 273 | if (arch_restore_msi_irqs(dev)) |
| 274 | __pci_write_msi_msg(entry, &entry->msg); |
Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 275 | |
Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 276 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 277 | pci_msi_update_mask(entry, 0, 0); |
Jesse Barnes | abad2ec | 2008-08-07 08:52:37 -0700 | [diff] [blame] | 278 | control &= ~PCI_MSI_FLAGS_QSIZE; |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 279 | control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; |
Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 280 | pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); |
Michael Ellerman | 8fed4b6 | 2007-01-25 19:34:08 +1100 | [diff] [blame] | 281 | } |
| 282 | |
Bjorn Helgaas | 830dfe8 | 2020-12-03 12:51:09 -0600 | [diff] [blame] | 283 | static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) |
| 284 | { |
| 285 | u16 ctrl; |
| 286 | |
| 287 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); |
| 288 | ctrl &= ~clear; |
| 289 | ctrl |= set; |
| 290 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); |
| 291 | } |
| 292 | |
Michael Ellerman | 8fed4b6 | 2007-01-25 19:34:08 +1100 | [diff] [blame] | 293 | static void __pci_restore_msix_state(struct pci_dev *dev) |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 294 | { |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 295 | struct msi_desc *entry; |
Thomas Gleixner | ae72f31 | 2021-12-06 23:27:42 +0100 | [diff] [blame] | 296 | bool write_msg; |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 297 | |
Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 298 | if (!dev->msix_enabled) |
| 299 | return; |
| 300 | |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 301 | /* route the table */ |
David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 302 | pci_intx_for_msi(dev, 0); |
Michael S. Tsirkin | 61b64ab | 2015-05-07 09:52:21 -0500 | [diff] [blame] | 303 | pci_msix_clear_and_set_ctrl(dev, 0, |
Yijing Wang | 66f0d0c | 2014-06-19 16:29:53 +0800 | [diff] [blame] | 304 | PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 305 | |
Thomas Gleixner | ae72f31 | 2021-12-06 23:27:42 +0100 | [diff] [blame] | 306 | write_msg = arch_restore_msi_irqs(dev); |
| 307 | |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 308 | msi_lock_descs(&dev->dev); |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 309 | msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { |
Thomas Gleixner | ae72f31 | 2021-12-06 23:27:42 +0100 | [diff] [blame] | 310 | if (write_msg) |
| 311 | __pci_write_msi_msg(entry, &entry->msg); |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 312 | pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); |
Thomas Gleixner | ae72f31 | 2021-12-06 23:27:42 +0100 | [diff] [blame] | 313 | } |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 314 | msi_unlock_descs(&dev->dev); |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 315 | |
Michael S. Tsirkin | 61b64ab | 2015-05-07 09:52:21 -0500 | [diff] [blame] | 316 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 317 | } |
Michael Ellerman | 8fed4b6 | 2007-01-25 19:34:08 +1100 | [diff] [blame] | 318 | |
| 319 | void pci_restore_msi_state(struct pci_dev *dev) |
| 320 | { |
| 321 | __pci_restore_msi_state(dev); |
| 322 | __pci_restore_msix_state(dev); |
| 323 | } |
Linas Vepstas | 94688cf | 2007-11-07 15:43:59 -0600 | [diff] [blame] | 324 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); |
Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 325 | |
Thomas Gleixner | 3f35d2c | 2021-12-15 18:16:44 +0100 | [diff] [blame] | 326 | static void pcim_msi_release(void *pcidev) |
| 327 | { |
| 328 | struct pci_dev *dev = pcidev; |
| 329 | |
| 330 | dev->is_msi_managed = false; |
| 331 | pci_free_irq_vectors(dev); |
| 332 | } |
| 333 | |
| 334 | /* |
| 335 | * Needs to be separate from pcim_release to prevent an ordering problem |
| 336 | * vs. msi_device_data_release() in the MSI core code. |
| 337 | */ |
| 338 | static int pcim_setup_msi_release(struct pci_dev *dev) |
| 339 | { |
| 340 | int ret; |
| 341 | |
| 342 | if (!pci_is_managed(dev) || dev->is_msi_managed) |
| 343 | return 0; |
| 344 | |
| 345 | ret = devm_add_action(&dev->dev, pcim_msi_release, dev); |
| 346 | if (!ret) |
| 347 | dev->is_msi_managed = true; |
| 348 | return ret; |
| 349 | } |
| 350 | |
Thomas Gleixner | 93296cd | 2021-12-15 18:19:49 +0100 | [diff] [blame] | 351 | /* |
| 352 | * Ordering vs. devres: msi device data has to be installed first so that |
| 353 | * pcim_msi_release() is invoked before it on device release. |
| 354 | */ |
| 355 | static int pci_setup_msi_context(struct pci_dev *dev) |
| 356 | { |
| 357 | int ret = msi_setup_device_data(&dev->dev); |
| 358 | |
| 359 | if (!ret) |
| 360 | ret = pcim_setup_msi_release(dev); |
| 361 | return ret; |
| 362 | } |
| 363 | |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 364 | static int msi_setup_msi_desc(struct pci_dev *dev, int nvec, |
| 365 | struct irq_affinity_desc *masks) |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 366 | { |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 367 | struct msi_desc desc; |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 368 | u16 control; |
| 369 | |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 370 | /* MSI Entry Initialization */ |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 371 | memset(&desc, 0, sizeof(desc)); |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 372 | |
| 373 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); |
Marc Zyngier | 2226667 | 2021-11-04 18:01:29 +0000 | [diff] [blame] | 374 | /* Lies, damned lies, and MSIs */ |
| 375 | if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) |
| 376 | control |= PCI_MSI_FLAGS_MASKBIT; |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 377 | /* Respect XEN's mask disabling */ |
| 378 | if (pci_msi_ignore_mask) |
| 379 | control &= ~PCI_MSI_FLAGS_MASKBIT; |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 380 | |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 381 | desc.nvec_used = nvec; |
| 382 | desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); |
| 383 | desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT); |
| 384 | desc.pci.msi_attrib.default_irq = dev->irq; |
| 385 | desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; |
| 386 | desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); |
| 387 | desc.affinity = masks; |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 388 | |
| 389 | if (control & PCI_MSI_FLAGS_64BIT) |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 390 | desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64; |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 391 | else |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 392 | desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32; |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 393 | |
| 394 | /* Save the initial mask status */ |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 395 | if (desc.pci.msi_attrib.can_mask) |
| 396 | pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask); |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 397 | |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 398 | return msi_add_msi_desc(&dev->dev, &desc); |
Yijing Wang | d873b4d | 2014-07-08 10:07:23 +0800 | [diff] [blame] | 399 | } |
| 400 | |
Benjamin Herrenschmidt | f144d14 | 2014-10-03 15:13:24 +1000 | [diff] [blame] | 401 | static int msi_verify_entries(struct pci_dev *dev) |
| 402 | { |
| 403 | struct msi_desc *entry; |
| 404 | |
Thomas Gleixner | a6e8b94 | 2021-07-29 23:51:52 +0200 | [diff] [blame] | 405 | if (!dev->no_64bit_msi) |
| 406 | return 0; |
| 407 | |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 408 | msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { |
Thomas Gleixner | a6e8b94 | 2021-07-29 23:51:52 +0200 | [diff] [blame] | 409 | if (entry->msg.address_hi) { |
Vidya Sagar | 2053230 | 2020-12-03 12:51:10 -0600 | [diff] [blame] | 410 | pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n", |
| 411 | entry->msg.address_hi, entry->msg.address_lo); |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 412 | break; |
Vidya Sagar | 2053230 | 2020-12-03 12:51:10 -0600 | [diff] [blame] | 413 | } |
Benjamin Herrenschmidt | f144d14 | 2014-10-03 15:13:24 +1000 | [diff] [blame] | 414 | } |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 415 | return !entry ? 0 : -EIO; |
Benjamin Herrenschmidt | f144d14 | 2014-10-03 15:13:24 +1000 | [diff] [blame] | 416 | } |
| 417 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | /** |
| 419 | * msi_capability_init - configure device's MSI capability structure |
| 420 | * @dev: pointer to the pci_dev data structure of MSI device function |
Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 421 | * @nvec: number of interrupts to allocate |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 422 | * @affd: description of automatic IRQ affinity assignments (may be %NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | * |
Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 424 | * Setup the MSI capability structure of the device with the requested |
| 425 | * number of interrupts. A return value of zero indicates the successful |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 426 | * setup of an entry with the new MSI IRQ. A negative return value indicates |
Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 427 | * an error, and a positive return value indicates the number of interrupts |
| 428 | * which could have been allocated. |
| 429 | */ |
Christoph Hellwig | 61e1c59 | 2016-11-08 17:15:04 -0800 | [diff] [blame] | 430 | static int msi_capability_init(struct pci_dev *dev, int nvec, |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 431 | struct irq_affinity *affd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | { |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 433 | struct irq_affinity_desc *masks = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | struct msi_desc *entry; |
Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 435 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | |
Thomas Gleixner | c7ecb95 | 2021-12-10 23:18:44 +0100 | [diff] [blame] | 437 | /* |
| 438 | * Disable MSI during setup in the hardware, but mark it enabled |
| 439 | * so that setup code can evaluate it. |
| 440 | */ |
| 441 | pci_msi_set_enable(dev, 0); |
| 442 | dev->msi_enabled = 1; |
Matthew Wilcox | 110828c | 2009-06-16 06:31:45 -0600 | [diff] [blame] | 443 | |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 444 | if (affd) |
| 445 | masks = irq_create_affinity_masks(nvec, affd); |
| 446 | |
| 447 | msi_lock_descs(&dev->dev); |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 448 | ret = msi_setup_msi_desc(dev, nvec, masks); |
| 449 | if (ret) |
Thomas Gleixner | c7ecb95 | 2021-12-10 23:18:44 +0100 | [diff] [blame] | 450 | goto fail; |
Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 451 | |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 452 | /* All MSIs are unmasked by default; mask them all */ |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 453 | entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 454 | pci_msi_mask(entry, msi_multi_mask(entry)); |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 455 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | /* Configure MSI capability structure */ |
Jiang Liu | 8e047ad | 2014-11-15 22:24:07 +0800 | [diff] [blame] | 457 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
Thomas Gleixner | 8eb5ce3 | 2021-07-29 23:51:54 +0200 | [diff] [blame] | 458 | if (ret) |
| 459 | goto err; |
Eric W. Biederman | f7feaca | 2007-01-28 12:56:37 -0700 | [diff] [blame] | 460 | |
Benjamin Herrenschmidt | f144d14 | 2014-10-03 15:13:24 +1000 | [diff] [blame] | 461 | ret = msi_verify_entries(dev); |
Thomas Gleixner | 8eb5ce3 | 2021-07-29 23:51:54 +0200 | [diff] [blame] | 462 | if (ret) |
| 463 | goto err; |
Benjamin Herrenschmidt | f144d14 | 2014-10-03 15:13:24 +1000 | [diff] [blame] | 464 | |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 465 | /* Set MSI enabled bits */ |
David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 466 | pci_intx_for_msi(dev, 0); |
Michael S. Tsirkin | 61b64ab | 2015-05-07 09:52:21 -0500 | [diff] [blame] | 467 | pci_msi_set_enable(dev, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | |
Jiang Liu | 5f22699 | 2015-07-30 14:00:08 -0500 | [diff] [blame] | 469 | pcibios_free_irq(dev); |
Michael Ellerman | 7fe3730 | 2007-04-18 19:39:21 +1000 | [diff] [blame] | 470 | dev->irq = entry->irq; |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 471 | goto unlock; |
Thomas Gleixner | 8eb5ce3 | 2021-07-29 23:51:54 +0200 | [diff] [blame] | 472 | |
| 473 | err: |
Thomas Gleixner | 446a98b | 2021-07-29 23:51:58 +0200 | [diff] [blame] | 474 | pci_msi_unmask(entry, msi_multi_mask(entry)); |
Thomas Gleixner | 8eb5ce3 | 2021-07-29 23:51:54 +0200 | [diff] [blame] | 475 | free_msi_irqs(dev); |
Thomas Gleixner | c7ecb95 | 2021-12-10 23:18:44 +0100 | [diff] [blame] | 476 | fail: |
| 477 | dev->msi_enabled = 0; |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 478 | unlock: |
| 479 | msi_unlock_descs(&dev->dev); |
| 480 | kfree(masks); |
Thomas Gleixner | 8eb5ce3 | 2021-07-29 23:51:54 +0200 | [diff] [blame] | 481 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | } |
| 483 | |
Krzysztof Wilczyński | fd1ae23 | 2021-10-13 01:41:36 +0000 | [diff] [blame] | 484 | static void __iomem *msix_map_region(struct pci_dev *dev, |
| 485 | unsigned int nr_entries) |
Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 486 | { |
Kenji Kaneshige | 4302e0f | 2010-06-17 10:42:44 +0900 | [diff] [blame] | 487 | resource_size_t phys_addr; |
Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 488 | u32 table_offset; |
Yijing Wang | 6a878e5 | 2015-01-28 09:52:17 +0800 | [diff] [blame] | 489 | unsigned long flags; |
Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 490 | u8 bir; |
| 491 | |
Bjorn Helgaas | 909094c | 2013-04-17 17:43:40 -0600 | [diff] [blame] | 492 | pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, |
| 493 | &table_offset); |
Bjorn Helgaas | 4d18760 | 2013-04-17 18:10:07 -0600 | [diff] [blame] | 494 | bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); |
Yijing Wang | 6a878e5 | 2015-01-28 09:52:17 +0800 | [diff] [blame] | 495 | flags = pci_resource_flags(dev, bir); |
| 496 | if (!flags || (flags & IORESOURCE_UNSET)) |
| 497 | return NULL; |
| 498 | |
Bjorn Helgaas | 4d18760 | 2013-04-17 18:10:07 -0600 | [diff] [blame] | 499 | table_offset &= PCI_MSIX_TABLE_OFFSET; |
Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 500 | phys_addr = pci_resource_start(dev, bir) + table_offset; |
| 501 | |
Christoph Hellwig | 4bdc0d6 | 2020-01-06 09:43:50 +0100 | [diff] [blame] | 502 | return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); |
Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 503 | } |
| 504 | |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 505 | static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base, |
| 506 | struct msix_entry *entries, int nvec, |
| 507 | struct irq_affinity_desc *masks) |
Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 508 | { |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 509 | int ret = 0, i, vec_count = pci_msix_vec_count(dev); |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 510 | struct irq_affinity_desc *curmsk; |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 511 | struct msi_desc desc; |
Thomas Gleixner | 7d5ec3d | 2021-07-29 23:51:41 +0200 | [diff] [blame] | 512 | void __iomem *addr; |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 513 | |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 514 | memset(&desc, 0, sizeof(desc)); |
| 515 | |
| 516 | desc.nvec_used = 1; |
| 517 | desc.pci.msi_attrib.is_msix = 1; |
| 518 | desc.pci.msi_attrib.is_64 = 1; |
| 519 | desc.pci.msi_attrib.default_irq = dev->irq; |
| 520 | desc.pci.mask_base = base; |
| 521 | |
| 522 | for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) { |
| 523 | desc.msi_index = entries ? entries[i].entry : i; |
| 524 | desc.affinity = masks ? curmsk : NULL; |
| 525 | desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count; |
| 526 | desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask && |
| 527 | !desc.pci.msi_attrib.is_virtual; |
| 528 | |
| 529 | if (!desc.pci.msi_attrib.can_mask) { |
| 530 | addr = pci_msix_desc_addr(&desc); |
| 531 | desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); |
Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 532 | } |
| 533 | |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 534 | ret = msi_add_msi_desc(&dev->dev, &desc); |
| 535 | if (ret) |
| 536 | break; |
Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 537 | } |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 538 | return ret; |
Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 539 | } |
| 540 | |
Thomas Gleixner | 7d5ec3d | 2021-07-29 23:51:41 +0200 | [diff] [blame] | 541 | static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) |
Hidetoshi Seto | 75cb342 | 2009-08-06 11:35:10 +0900 | [diff] [blame] | 542 | { |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 543 | struct msi_desc *desc; |
Hidetoshi Seto | 75cb342 | 2009-08-06 11:35:10 +0900 | [diff] [blame] | 544 | |
Thomas Gleixner | 7112158 | 2021-12-06 23:27:46 +0100 | [diff] [blame] | 545 | if (entries) { |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 546 | msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { |
| 547 | entries->vector = desc->irq; |
Thomas Gleixner | 7d5ec3d | 2021-07-29 23:51:41 +0200 | [diff] [blame] | 548 | entries++; |
| 549 | } |
Hidetoshi Seto | 75cb342 | 2009-08-06 11:35:10 +0900 | [diff] [blame] | 550 | } |
| 551 | } |
| 552 | |
Thomas Gleixner | 7d5ec3d | 2021-07-29 23:51:41 +0200 | [diff] [blame] | 553 | static void msix_mask_all(void __iomem *base, int tsize) |
| 554 | { |
| 555 | u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; |
| 556 | int i; |
| 557 | |
Marek Marczykowski-Górecki | 1a519dc | 2021-08-26 19:03:42 +0200 | [diff] [blame] | 558 | if (pci_msi_ignore_mask) |
| 559 | return; |
| 560 | |
Thomas Gleixner | 7d5ec3d | 2021-07-29 23:51:41 +0200 | [diff] [blame] | 561 | for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) |
| 562 | writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); |
| 563 | } |
| 564 | |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 565 | static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base, |
| 566 | struct msix_entry *entries, int nvec, |
| 567 | struct irq_affinity *affd) |
| 568 | { |
| 569 | struct irq_affinity_desc *masks = NULL; |
| 570 | int ret; |
| 571 | |
| 572 | if (affd) |
| 573 | masks = irq_create_affinity_masks(nvec, affd); |
| 574 | |
| 575 | msi_lock_descs(&dev->dev); |
Thomas Gleixner | 71020a3 | 2021-12-06 23:51:15 +0100 | [diff] [blame] | 576 | ret = msix_setup_msi_descs(dev, base, entries, nvec, masks); |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 577 | if (ret) |
| 578 | goto out_free; |
| 579 | |
| 580 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); |
| 581 | if (ret) |
| 582 | goto out_free; |
| 583 | |
| 584 | /* Check if all MSI entries honor device restrictions */ |
| 585 | ret = msi_verify_entries(dev); |
| 586 | if (ret) |
| 587 | goto out_free; |
| 588 | |
| 589 | msix_update_entries(dev, entries); |
| 590 | goto out_unlock; |
| 591 | |
| 592 | out_free: |
| 593 | free_msi_irqs(dev); |
| 594 | out_unlock: |
| 595 | msi_unlock_descs(&dev->dev); |
| 596 | kfree(masks); |
| 597 | return ret; |
| 598 | } |
| 599 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | /** |
| 601 | * msix_capability_init - configure device's MSI-X capability |
| 602 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
Randy Dunlap | 8f7020d | 2005-10-23 11:57:38 -0700 | [diff] [blame] | 603 | * @entries: pointer to an array of struct msix_entry entries |
| 604 | * @nvec: number of @entries |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 605 | * @affd: Optional pointer to enable automatic affinity assignment |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | * |
Steven Cole | eaae4b3 | 2005-05-03 18:38:30 -0600 | [diff] [blame] | 607 | * Setup the MSI-X capability structure of device function with a |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 608 | * single MSI-X IRQ. A return of zero indicates the successful setup of |
| 609 | * requested MSI-X entries with allocated IRQs or non-zero for otherwise. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | **/ |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 611 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 612 | int nvec, struct irq_affinity *affd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | void __iomem *base; |
Thomas Gleixner | 7d5ec3d | 2021-07-29 23:51:41 +0200 | [diff] [blame] | 615 | int ret, tsize; |
| 616 | u16 control; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | |
Thomas Gleixner | 4385539 | 2021-07-29 23:51:40 +0200 | [diff] [blame] | 618 | /* |
| 619 | * Some devices require MSI-X to be enabled before the MSI-X |
| 620 | * registers can be accessed. Mask all the vectors to prevent |
| 621 | * interrupts coming in before they're fully set up. |
| 622 | */ |
| 623 | pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | |
| 624 | PCI_MSIX_FLAGS_ENABLE); |
Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 625 | |
Thomas Gleixner | c7ecb95 | 2021-12-10 23:18:44 +0100 | [diff] [blame] | 626 | /* Mark it enabled so setup functions can query it */ |
| 627 | dev->msix_enabled = 1; |
| 628 | |
Yijing Wang | 66f0d0c | 2014-06-19 16:29:53 +0800 | [diff] [blame] | 629 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | /* Request & Map MSI-X table region */ |
Thomas Gleixner | 7d5ec3d | 2021-07-29 23:51:41 +0200 | [diff] [blame] | 631 | tsize = msix_table_size(control); |
| 632 | base = msix_map_region(dev, tsize); |
Thomas Gleixner | 4385539 | 2021-07-29 23:51:40 +0200 | [diff] [blame] | 633 | if (!base) { |
| 634 | ret = -ENOMEM; |
| 635 | goto out_disable; |
| 636 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | |
Thomas Gleixner | 85aa607 | 2021-12-06 23:27:54 +0100 | [diff] [blame] | 638 | dev->msix_base = base; |
| 639 | |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 640 | ret = msix_setup_interrupts(dev, base, entries, nvec, affd); |
Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 641 | if (ret) |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 642 | goto out_disable; |
Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 643 | |
Thomas Gleixner | c7ecb95 | 2021-12-10 23:18:44 +0100 | [diff] [blame] | 644 | /* Disable INTX */ |
David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 645 | pci_intx_for_msi(dev, 0); |
Stefan Roese | 83dbf89 | 2021-12-14 12:49:32 +0100 | [diff] [blame] | 646 | |
| 647 | /* |
| 648 | * Ensure that all table entries are masked to prevent |
| 649 | * stale entries from firing in a crash kernel. |
| 650 | * |
| 651 | * Done late to deal with a broken Marvell NVME device |
| 652 | * which takes the MSI-X mask bits into account even |
| 653 | * when MSI-X is disabled, which prevents MSI delivery. |
| 654 | */ |
| 655 | msix_mask_all(base, tsize); |
Michael S. Tsirkin | 61b64ab | 2015-05-07 09:52:21 -0500 | [diff] [blame] | 656 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); |
Matthew Wilcox | 8d18101 | 2009-05-08 07:13:33 -0600 | [diff] [blame] | 657 | |
Jiang Liu | 5f22699 | 2015-07-30 14:00:08 -0500 | [diff] [blame] | 658 | pcibios_free_irq(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | return 0; |
Hidetoshi Seto | 583871d | 2009-08-06 11:33:39 +0900 | [diff] [blame] | 660 | |
Thomas Gleixner | 4385539 | 2021-07-29 23:51:40 +0200 | [diff] [blame] | 661 | out_disable: |
Thomas Gleixner | c7ecb95 | 2021-12-10 23:18:44 +0100 | [diff] [blame] | 662 | dev->msix_enabled = 0; |
Thomas Gleixner | 94185ad | 2021-12-14 12:42:14 +0100 | [diff] [blame] | 663 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); |
Thomas Gleixner | 4385539 | 2021-07-29 23:51:40 +0200 | [diff] [blame] | 664 | |
Hidetoshi Seto | 583871d | 2009-08-06 11:33:39 +0900 | [diff] [blame] | 665 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | } |
| 667 | |
| 668 | /** |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 669 | * pci_msi_supported - check whether MSI may be enabled on a device |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 670 | * @dev: pointer to the pci_dev data structure of MSI device function |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 671 | * @nvec: how many MSIs have been requested? |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 672 | * |
Bjorn Helgaas | f762598 | 2013-11-14 11:28:18 -0700 | [diff] [blame] | 673 | * Look at global flags, the device itself, and its parent buses |
Michael Ellerman | 17bbc12 | 2007-04-05 17:19:07 +1000 | [diff] [blame] | 674 | * to determine if MSI/-X are supported for the device. If MSI/-X is |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 675 | * supported return 1, else return 0. |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 676 | **/ |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 677 | static int pci_msi_supported(struct pci_dev *dev, int nvec) |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 678 | { |
| 679 | struct pci_bus *bus; |
| 680 | |
Brice Goglin | 0306ebf | 2006-10-05 10:24:31 +0200 | [diff] [blame] | 681 | /* MSI must be globally enabled and supported by the device */ |
Alexander Gordeev | 27e2060 | 2014-09-23 14:25:11 -0600 | [diff] [blame] | 682 | if (!pci_msi_enable) |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 683 | return 0; |
Alexander Gordeev | 27e2060 | 2014-09-23 14:25:11 -0600 | [diff] [blame] | 684 | |
Bjorn Helgaas | 901c4dd | 2019-10-14 16:17:05 -0500 | [diff] [blame] | 685 | if (!dev || dev->no_msi) |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 686 | return 0; |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 687 | |
Michael Ellerman | 314e77b | 2007-04-05 17:19:12 +1000 | [diff] [blame] | 688 | /* |
| 689 | * You can't ask to have 0 or less MSIs configured. |
| 690 | * a) it's stupid .. |
| 691 | * b) the list manipulation code assumes nvec >= 1. |
| 692 | */ |
| 693 | if (nvec < 1) |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 694 | return 0; |
Michael Ellerman | 314e77b | 2007-04-05 17:19:12 +1000 | [diff] [blame] | 695 | |
Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 696 | /* |
| 697 | * Any bridge which does NOT route MSI transactions from its |
| 698 | * secondary bus to its primary bus must set NO_MSI flag on |
Brice Goglin | 0306ebf | 2006-10-05 10:24:31 +0200 | [diff] [blame] | 699 | * the secondary pci_bus. |
Marc Zyngier | 61af692 | 2021-03-30 16:11:44 +0100 | [diff] [blame] | 700 | * |
| 701 | * The NO_MSI flag can either be set directly by: |
| 702 | * - arch-specific PCI host bus controller drivers (deprecated) |
| 703 | * - quirks for specific PCI bridges |
| 704 | * |
| 705 | * or indirectly by platform-specific PCI host bridge drivers by |
| 706 | * advertising the 'msi_domain' property, which results in |
| 707 | * the NO_MSI flag when no MSI domain is found for this bridge |
| 708 | * at probe time. |
Brice Goglin | 0306ebf | 2006-10-05 10:24:31 +0200 | [diff] [blame] | 709 | */ |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 710 | for (bus = dev->bus; bus; bus = bus->parent) |
| 711 | if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 712 | return 0; |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 713 | |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 714 | return 1; |
Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 715 | } |
| 716 | |
| 717 | /** |
Alexander Gordeev | d1ac1d2 | 2013-12-30 08:28:13 +0100 | [diff] [blame] | 718 | * pci_msi_vec_count - Return the number of MSI vectors a device can send |
| 719 | * @dev: device to report about |
| 720 | * |
| 721 | * This function returns the number of MSI vectors a device requested via |
| 722 | * Multiple Message Capable register. It returns a negative errno if the |
| 723 | * device is not capable sending MSI interrupts. Otherwise, the call succeeds |
| 724 | * and returns a power of two, up to a maximum of 2^5 (32), according to the |
| 725 | * MSI specification. |
| 726 | **/ |
| 727 | int pci_msi_vec_count(struct pci_dev *dev) |
| 728 | { |
| 729 | int ret; |
| 730 | u16 msgctl; |
| 731 | |
| 732 | if (!dev->msi_cap) |
| 733 | return -EINVAL; |
| 734 | |
| 735 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); |
| 736 | ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); |
| 737 | |
| 738 | return ret; |
| 739 | } |
| 740 | EXPORT_SYMBOL(pci_msi_vec_count); |
| 741 | |
Bjorn Helgaas | 688769f | 2017-03-09 15:45:14 -0600 | [diff] [blame] | 742 | static void pci_msi_shutdown(struct pci_dev *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | { |
Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 744 | struct msi_desc *desc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | |
Michael Ellerman | 128bc5f | 2007-03-22 21:51:39 +1100 | [diff] [blame] | 746 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 747 | return; |
| 748 | |
Michael S. Tsirkin | 61b64ab | 2015-05-07 09:52:21 -0500 | [diff] [blame] | 749 | pci_msi_set_enable(dev, 0); |
David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 750 | pci_intx_for_msi(dev, 1); |
Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 751 | dev->msi_enabled = 0; |
Eric W. Biederman | 7bd007e | 2006-10-04 02:16:31 -0700 | [diff] [blame] | 752 | |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 753 | /* Return the device with MSI unmasked as initial states */ |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 754 | desc = msi_first_desc(&dev->dev, MSI_DESC_ALL); |
| 755 | if (!WARN_ON_ONCE(!desc)) |
| 756 | pci_msi_unmask(desc, msi_multi_mask(desc)); |
Michael Ellerman | e387b9e | 2007-03-22 21:51:27 +1100 | [diff] [blame] | 757 | |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 758 | /* Restore dev->irq to its default pin-assertion IRQ */ |
Thomas Gleixner | e58f225 | 2021-12-06 23:27:39 +0100 | [diff] [blame] | 759 | dev->irq = desc->pci.msi_attrib.default_irq; |
Jiang Liu | 5f22699 | 2015-07-30 14:00:08 -0500 | [diff] [blame] | 760 | pcibios_alloc_irq(dev); |
Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 761 | } |
Matthew Wilcox | 24d2755 | 2009-03-17 08:54:06 -0400 | [diff] [blame] | 762 | |
Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 763 | void pci_disable_msi(struct pci_dev *dev) |
Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 764 | { |
Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 765 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
| 766 | return; |
| 767 | |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 768 | msi_lock_descs(&dev->dev); |
Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 769 | pci_msi_shutdown(dev); |
Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 770 | free_msi_irqs(dev); |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 771 | msi_unlock_descs(&dev->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | } |
Michael Ellerman | 4cc086f | 2007-03-22 21:51:34 +1100 | [diff] [blame] | 773 | EXPORT_SYMBOL(pci_disable_msi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | /** |
Alexander Gordeev | ff1aa43 | 2013-12-30 08:28:15 +0100 | [diff] [blame] | 776 | * pci_msix_vec_count - return the number of device's MSI-X table entries |
Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 777 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
Alexander Gordeev | ff1aa43 | 2013-12-30 08:28:15 +0100 | [diff] [blame] | 778 | * This function returns the number of device's MSI-X table entries and |
| 779 | * therefore the number of MSI-X vectors device is capable of sending. |
| 780 | * It returns a negative errno if the device is not capable of sending MSI-X |
| 781 | * interrupts. |
| 782 | **/ |
| 783 | int pci_msix_vec_count(struct pci_dev *dev) |
Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 784 | { |
Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 785 | u16 control; |
| 786 | |
Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 787 | if (!dev->msix_cap) |
Alexander Gordeev | ff1aa43 | 2013-12-30 08:28:15 +0100 | [diff] [blame] | 788 | return -EINVAL; |
Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 789 | |
Bjorn Helgaas | f84ecd28 | 2013-04-17 17:38:32 -0600 | [diff] [blame] | 790 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); |
Bjorn Helgaas | 527eee2 | 2013-04-17 17:44:48 -0600 | [diff] [blame] | 791 | return msix_table_size(control); |
Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 792 | } |
Alexander Gordeev | ff1aa43 | 2013-12-30 08:28:15 +0100 | [diff] [blame] | 793 | EXPORT_SYMBOL(pci_msix_vec_count); |
Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 794 | |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 795 | static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 796 | int nvec, struct irq_affinity *affd, int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | { |
Bjorn Helgaas | 5ec0940 | 2014-09-23 14:38:28 -0600 | [diff] [blame] | 798 | int nr_entries; |
Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 799 | int i, j; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | |
Bjorn Helgaas | 901c4dd | 2019-10-14 16:17:05 -0500 | [diff] [blame] | 801 | if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 802 | return -EINVAL; |
Michael Ellerman | c9953a7 | 2007-04-05 17:19:08 +1000 | [diff] [blame] | 803 | |
Alexander Gordeev | ff1aa43 | 2013-12-30 08:28:15 +0100 | [diff] [blame] | 804 | nr_entries = pci_msix_vec_count(dev); |
| 805 | if (nr_entries < 0) |
| 806 | return nr_entries; |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 807 | if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL)) |
Michael S. Tsirkin | 57fbf52 | 2009-05-07 11:28:41 +0300 | [diff] [blame] | 808 | return nr_entries; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | |
Christoph Hellwig | 3ac020e | 2016-07-12 18:20:16 +0900 | [diff] [blame] | 810 | if (entries) { |
| 811 | /* Check for any invalid entries */ |
| 812 | for (i = 0; i < nvec; i++) { |
| 813 | if (entries[i].entry >= nr_entries) |
| 814 | return -EINVAL; /* invalid entry */ |
| 815 | for (j = i + 1; j < nvec; j++) { |
| 816 | if (entries[i].entry == entries[j].entry) |
| 817 | return -EINVAL; /* duplicate entry */ |
| 818 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | } |
| 820 | } |
Eric W. Biederman | 7bd007e | 2006-10-04 02:16:31 -0700 | [diff] [blame] | 821 | |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 822 | /* Check whether driver already requested for MSI IRQ */ |
Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 823 | if (dev->msi_enabled) { |
Frederick Lawler | 7506dc7 | 2018-01-18 12:55:24 -0600 | [diff] [blame] | 824 | pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | return -EINVAL; |
| 826 | } |
Christoph Hellwig | 61e1c59 | 2016-11-08 17:15:04 -0800 | [diff] [blame] | 827 | return msix_capability_init(dev, entries, nvec, affd); |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 828 | } |
| 829 | |
Bjorn Helgaas | 688769f | 2017-03-09 15:45:14 -0600 | [diff] [blame] | 830 | static void pci_msix_shutdown(struct pci_dev *dev) |
Michael Ellerman | fc4afc7 | 2007-03-22 21:51:33 +1100 | [diff] [blame] | 831 | { |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 832 | struct msi_desc *desc; |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 833 | |
Michael Ellerman | 128bc5f | 2007-03-22 21:51:39 +1100 | [diff] [blame] | 834 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 835 | return; |
| 836 | |
Keith Busch | 0170591 | 2017-03-29 22:49:11 -0500 | [diff] [blame] | 837 | if (pci_dev_is_disconnected(dev)) { |
| 838 | dev->msix_enabled = 0; |
| 839 | return; |
| 840 | } |
| 841 | |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 842 | /* Return the device with MSI-X masked as initial states */ |
Thomas Gleixner | ae24e28 | 2021-12-06 23:51:18 +0100 | [diff] [blame] | 843 | msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) |
| 844 | pci_msix_mask(desc); |
Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 845 | |
Michael S. Tsirkin | 61b64ab | 2015-05-07 09:52:21 -0500 | [diff] [blame] | 846 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 847 | pci_intx_for_msi(dev, 1); |
Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 848 | dev->msix_enabled = 0; |
Jiang Liu | 5f22699 | 2015-07-30 14:00:08 -0500 | [diff] [blame] | 849 | pcibios_alloc_irq(dev); |
Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 850 | } |
Hidetoshi Seto | c901851 | 2009-08-06 11:31:27 +0900 | [diff] [blame] | 851 | |
Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 852 | void pci_disable_msix(struct pci_dev *dev) |
Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 853 | { |
| 854 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
| 855 | return; |
| 856 | |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 857 | msi_lock_descs(&dev->dev); |
Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 858 | pci_msix_shutdown(dev); |
Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 859 | free_msi_irqs(dev); |
Thomas Gleixner | 5512c5e | 2021-12-06 23:51:13 +0100 | [diff] [blame] | 860 | msi_unlock_descs(&dev->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | } |
Michael Ellerman | 4cc086f | 2007-03-22 21:51:34 +1100 | [diff] [blame] | 862 | EXPORT_SYMBOL(pci_disable_msix); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 864 | static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 865 | struct irq_affinity *affd) |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 866 | { |
Alexander Gordeev | 034cd97 | 2014-04-14 15:28:35 +0200 | [diff] [blame] | 867 | int nvec; |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 868 | int rc; |
| 869 | |
Bjorn Helgaas | 901c4dd | 2019-10-14 16:17:05 -0500 | [diff] [blame] | 870 | if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) |
Alexander Gordeev | a06cd74 | 2014-09-23 12:45:58 -0600 | [diff] [blame] | 871 | return -EINVAL; |
Alexander Gordeev | 034cd97 | 2014-04-14 15:28:35 +0200 | [diff] [blame] | 872 | |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 873 | /* Check whether driver already requested MSI-X IRQs */ |
Alexander Gordeev | 034cd97 | 2014-04-14 15:28:35 +0200 | [diff] [blame] | 874 | if (dev->msix_enabled) { |
Frederick Lawler | 7506dc7 | 2018-01-18 12:55:24 -0600 | [diff] [blame] | 875 | pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); |
Alexander Gordeev | 034cd97 | 2014-04-14 15:28:35 +0200 | [diff] [blame] | 876 | return -EINVAL; |
| 877 | } |
| 878 | |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 879 | if (maxvec < minvec) |
| 880 | return -ERANGE; |
| 881 | |
Tonghao Zhang | 4c1ef72 | 2018-09-24 07:00:41 -0700 | [diff] [blame] | 882 | if (WARN_ON_ONCE(dev->msi_enabled)) |
| 883 | return -EINVAL; |
| 884 | |
Alexander Gordeev | 034cd97 | 2014-04-14 15:28:35 +0200 | [diff] [blame] | 885 | nvec = pci_msi_vec_count(dev); |
| 886 | if (nvec < 0) |
| 887 | return nvec; |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 888 | if (nvec < minvec) |
Dennis Chen | 948b762 | 2016-12-01 10:15:04 +0800 | [diff] [blame] | 889 | return -ENOSPC; |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 890 | |
| 891 | if (nvec > maxvec) |
Alexander Gordeev | 034cd97 | 2014-04-14 15:28:35 +0200 | [diff] [blame] | 892 | nvec = maxvec; |
| 893 | |
Thomas Gleixner | 93296cd | 2021-12-15 18:19:49 +0100 | [diff] [blame] | 894 | rc = pci_setup_msi_context(dev); |
Thomas Gleixner | 3f35d2c | 2021-12-15 18:16:44 +0100 | [diff] [blame] | 895 | if (rc) |
| 896 | return rc; |
| 897 | |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 898 | for (;;) { |
Christoph Hellwig | 61e1c59 | 2016-11-08 17:15:04 -0800 | [diff] [blame] | 899 | if (affd) { |
Michael Hernandez | 6f9a22b | 2017-05-18 10:47:47 -0700 | [diff] [blame] | 900 | nvec = irq_calc_affinity_vectors(minvec, nvec, affd); |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 901 | if (nvec < minvec) |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 902 | return -ENOSPC; |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 903 | } |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 904 | |
Christoph Hellwig | 61e1c59 | 2016-11-08 17:15:04 -0800 | [diff] [blame] | 905 | rc = msi_capability_init(dev, nvec, affd); |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 906 | if (rc == 0) |
| 907 | return nvec; |
| 908 | |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 909 | if (rc < 0) |
| 910 | return rc; |
| 911 | if (rc < minvec) |
| 912 | return -ENOSPC; |
| 913 | |
| 914 | nvec = rc; |
| 915 | } |
| 916 | } |
| 917 | |
Christoph Hellwig | 4fe0395 | 2017-01-09 21:37:40 +0100 | [diff] [blame] | 918 | /* deprecated, don't use */ |
| 919 | int pci_enable_msi(struct pci_dev *dev) |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 920 | { |
Christoph Hellwig | 4fe0395 | 2017-01-09 21:37:40 +0100 | [diff] [blame] | 921 | int rc = __pci_enable_msi_range(dev, 1, 1, NULL); |
| 922 | if (rc < 0) |
| 923 | return rc; |
| 924 | return 0; |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 925 | } |
Christoph Hellwig | 4fe0395 | 2017-01-09 21:37:40 +0100 | [diff] [blame] | 926 | EXPORT_SYMBOL(pci_enable_msi); |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 927 | |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 928 | static int __pci_enable_msix_range(struct pci_dev *dev, |
Christoph Hellwig | 61e1c59 | 2016-11-08 17:15:04 -0800 | [diff] [blame] | 929 | struct msix_entry *entries, int minvec, |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 930 | int maxvec, struct irq_affinity *affd, |
| 931 | int flags) |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 932 | { |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 933 | int rc, nvec = maxvec; |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 934 | |
| 935 | if (maxvec < minvec) |
| 936 | return -ERANGE; |
| 937 | |
Tonghao Zhang | 4c1ef72 | 2018-09-24 07:00:41 -0700 | [diff] [blame] | 938 | if (WARN_ON_ONCE(dev->msix_enabled)) |
| 939 | return -EINVAL; |
| 940 | |
Thomas Gleixner | 93296cd | 2021-12-15 18:19:49 +0100 | [diff] [blame] | 941 | rc = pci_setup_msi_context(dev); |
Thomas Gleixner | 3f35d2c | 2021-12-15 18:16:44 +0100 | [diff] [blame] | 942 | if (rc) |
| 943 | return rc; |
| 944 | |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 945 | for (;;) { |
Christoph Hellwig | 61e1c59 | 2016-11-08 17:15:04 -0800 | [diff] [blame] | 946 | if (affd) { |
Michael Hernandez | 6f9a22b | 2017-05-18 10:47:47 -0700 | [diff] [blame] | 947 | nvec = irq_calc_affinity_vectors(minvec, nvec, affd); |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 948 | if (nvec < minvec) |
| 949 | return -ENOSPC; |
| 950 | } |
| 951 | |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 952 | rc = __pci_enable_msix(dev, entries, nvec, affd, flags); |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 953 | if (rc == 0) |
| 954 | return nvec; |
| 955 | |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 956 | if (rc < 0) |
| 957 | return rc; |
| 958 | if (rc < minvec) |
| 959 | return -ENOSPC; |
| 960 | |
| 961 | nvec = rc; |
| 962 | } |
| 963 | } |
| 964 | |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 965 | /** |
| 966 | * pci_enable_msix_range - configure device's MSI-X capability structure |
| 967 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
| 968 | * @entries: pointer to an array of MSI-X entries |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 969 | * @minvec: minimum number of MSI-X IRQs requested |
| 970 | * @maxvec: maximum number of MSI-X IRQs requested |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 971 | * |
| 972 | * Setup the MSI-X capability structure of device function with a maximum |
| 973 | * possible number of interrupts in the range between @minvec and @maxvec |
| 974 | * upon its software driver call to request for MSI-X mode enabled on its |
| 975 | * hardware device function. It returns a negative errno if an error occurs. |
| 976 | * If it succeeds, it returns the actual number of interrupts allocated and |
| 977 | * indicates the successful configuration of MSI-X capability structure |
| 978 | * with new allocated MSI-X interrupts. |
| 979 | **/ |
| 980 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, |
Christoph Hellwig | 4ef3368 | 2016-07-12 18:20:18 +0900 | [diff] [blame] | 981 | int minvec, int maxvec) |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 982 | { |
Logan Gunthorpe | d7cc609 | 2019-05-23 16:30:51 -0600 | [diff] [blame] | 983 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0); |
Alexander Gordeev | 302a252 | 2013-12-30 08:28:16 +0100 | [diff] [blame] | 984 | } |
| 985 | EXPORT_SYMBOL(pci_enable_msix_range); |
Jiang Liu | 3878eae | 2014-11-11 21:02:18 +0800 | [diff] [blame] | 986 | |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 987 | /** |
Christoph Hellwig | 402723a | 2016-11-08 17:15:05 -0800 | [diff] [blame] | 988 | * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 989 | * @dev: PCI device to operate on |
| 990 | * @min_vecs: minimum number of vectors required (must be >= 1) |
| 991 | * @max_vecs: maximum (desired) number of vectors |
| 992 | * @flags: flags or quirks for the allocation |
Christoph Hellwig | 402723a | 2016-11-08 17:15:05 -0800 | [diff] [blame] | 993 | * @affd: optional description of the affinity requirements |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 994 | * |
| 995 | * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI |
| 996 | * vectors if available, and fall back to a single legacy vector |
| 997 | * if neither is available. Return the number of vectors allocated, |
| 998 | * (which might be smaller than @max_vecs) if successful, or a negative |
| 999 | * error code on error. If less than @min_vecs interrupt vectors are |
| 1000 | * available for @dev the function will fail with -ENOSPC. |
| 1001 | * |
| 1002 | * To get the Linux IRQ number used for a vector that can be passed to |
| 1003 | * request_irq() use the pci_irq_vector() helper. |
| 1004 | */ |
Christoph Hellwig | 402723a | 2016-11-08 17:15:05 -0800 | [diff] [blame] | 1005 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1006 | unsigned int max_vecs, unsigned int flags, |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 1007 | struct irq_affinity *affd) |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1008 | { |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 1009 | struct irq_affinity msi_default_affd = {0}; |
Piotr Stankiewicz | 30ff3e8 | 2020-06-16 09:33:16 +0200 | [diff] [blame] | 1010 | int nvecs = -ENOSPC; |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1011 | |
Christoph Hellwig | 402723a | 2016-11-08 17:15:05 -0800 | [diff] [blame] | 1012 | if (flags & PCI_IRQ_AFFINITY) { |
| 1013 | if (!affd) |
| 1014 | affd = &msi_default_affd; |
| 1015 | } else { |
| 1016 | if (WARN_ON(affd)) |
| 1017 | affd = NULL; |
| 1018 | } |
Christoph Hellwig | 61e1c59 | 2016-11-08 17:15:04 -0800 | [diff] [blame] | 1019 | |
Christoph Hellwig | 4fe0d15 | 2016-08-11 07:11:04 -0700 | [diff] [blame] | 1020 | if (flags & PCI_IRQ_MSIX) { |
Piotr Stankiewicz | 30ff3e8 | 2020-06-16 09:33:16 +0200 | [diff] [blame] | 1021 | nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, |
| 1022 | affd, flags); |
| 1023 | if (nvecs > 0) |
| 1024 | return nvecs; |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1025 | } |
| 1026 | |
Christoph Hellwig | 4fe0d15 | 2016-08-11 07:11:04 -0700 | [diff] [blame] | 1027 | if (flags & PCI_IRQ_MSI) { |
Piotr Stankiewicz | 30ff3e8 | 2020-06-16 09:33:16 +0200 | [diff] [blame] | 1028 | nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); |
| 1029 | if (nvecs > 0) |
| 1030 | return nvecs; |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1031 | } |
| 1032 | |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 1033 | /* use legacy IRQ if allowed */ |
Christoph Hellwig | 862290f | 2017-02-01 14:41:42 +0100 | [diff] [blame] | 1034 | if (flags & PCI_IRQ_LEGACY) { |
| 1035 | if (min_vecs == 1 && dev->irq) { |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 1036 | /* |
| 1037 | * Invoke the affinity spreading logic to ensure that |
| 1038 | * the device driver can adjust queue configuration |
| 1039 | * for the single interrupt case. |
| 1040 | */ |
| 1041 | if (affd) |
| 1042 | irq_create_affinity_masks(1, affd); |
Christoph Hellwig | 862290f | 2017-02-01 14:41:42 +0100 | [diff] [blame] | 1043 | pci_intx(dev, 1); |
| 1044 | return 1; |
| 1045 | } |
Christoph Hellwig | 5d0bdf2 | 2016-08-11 07:11:05 -0700 | [diff] [blame] | 1046 | } |
| 1047 | |
Piotr Stankiewicz | 30ff3e8 | 2020-06-16 09:33:16 +0200 | [diff] [blame] | 1048 | return nvecs; |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1049 | } |
Christoph Hellwig | 402723a | 2016-11-08 17:15:05 -0800 | [diff] [blame] | 1050 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1051 | |
| 1052 | /** |
| 1053 | * pci_free_irq_vectors - free previously allocated IRQs for a device |
| 1054 | * @dev: PCI device to operate on |
| 1055 | * |
| 1056 | * Undoes the allocations and enabling in pci_alloc_irq_vectors(). |
| 1057 | */ |
| 1058 | void pci_free_irq_vectors(struct pci_dev *dev) |
| 1059 | { |
| 1060 | pci_disable_msix(dev); |
| 1061 | pci_disable_msi(dev); |
| 1062 | } |
| 1063 | EXPORT_SYMBOL(pci_free_irq_vectors); |
| 1064 | |
| 1065 | /** |
| 1066 | * pci_irq_vector - return Linux IRQ number of a device vector |
Thomas Gleixner | 29bbc35 | 2021-12-06 23:27:26 +0100 | [diff] [blame] | 1067 | * @dev: PCI device to operate on |
| 1068 | * @nr: Interrupt vector index (0-based) |
| 1069 | * |
| 1070 | * @nr has the following meanings depending on the interrupt mode: |
| 1071 | * MSI-X: The index in the MSI-X vector table |
| 1072 | * MSI: The index of the enabled MSI vectors |
| 1073 | * INTx: Must be 0 |
| 1074 | * |
| 1075 | * Return: The Linux interrupt number or -EINVAl if @nr is out of range. |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1076 | */ |
| 1077 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr) |
| 1078 | { |
Thomas Gleixner | 82ff8e6 | 2021-12-10 23:19:25 +0100 | [diff] [blame] | 1079 | unsigned int irq; |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1080 | |
Thomas Gleixner | 82ff8e6 | 2021-12-10 23:19:25 +0100 | [diff] [blame] | 1081 | if (!dev->msi_enabled && !dev->msix_enabled) |
| 1082 | return !nr ? dev->irq : -EINVAL; |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1083 | |
Thomas Gleixner | 82ff8e6 | 2021-12-10 23:19:25 +0100 | [diff] [blame] | 1084 | irq = msi_get_virq(&dev->dev, nr); |
| 1085 | return irq ? irq : -EINVAL; |
Christoph Hellwig | aff1716 | 2016-07-12 18:20:17 +0900 | [diff] [blame] | 1086 | } |
| 1087 | EXPORT_SYMBOL(pci_irq_vector); |
| 1088 | |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1089 | /** |
Bjorn Helgaas | f6b6aef | 2019-05-30 08:05:58 -0500 | [diff] [blame] | 1090 | * pci_irq_get_affinity - return the affinity of a particular MSI vector |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1091 | * @dev: PCI device to operate on |
| 1092 | * @nr: device-relative interrupt vector index (0-based). |
Thomas Gleixner | 29bbc35 | 2021-12-06 23:27:26 +0100 | [diff] [blame] | 1093 | * |
| 1094 | * @nr has the following meanings depending on the interrupt mode: |
| 1095 | * MSI-X: The index in the MSI-X vector table |
| 1096 | * MSI: The index of the enabled MSI vectors |
| 1097 | * INTx: Must be 0 |
| 1098 | * |
| 1099 | * Return: A cpumask pointer or NULL if @nr is out of range |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1100 | */ |
| 1101 | const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) |
| 1102 | { |
Thomas Gleixner | d558285 | 2021-12-18 11:25:14 +0100 | [diff] [blame] | 1103 | int idx, irq = pci_irq_vector(dev, nr); |
Thomas Gleixner | f482359 | 2021-12-10 23:19:26 +0100 | [diff] [blame] | 1104 | struct msi_desc *desc; |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1105 | |
Thomas Gleixner | f482359 | 2021-12-10 23:19:26 +0100 | [diff] [blame] | 1106 | if (WARN_ON_ONCE(irq <= 0)) |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1107 | return NULL; |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1108 | |
Thomas Gleixner | f482359 | 2021-12-10 23:19:26 +0100 | [diff] [blame] | 1109 | desc = irq_get_msi_desc(irq); |
| 1110 | /* Non-MSI does not have the information handy */ |
| 1111 | if (!desc) |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1112 | return cpu_possible_mask; |
Thomas Gleixner | f482359 | 2021-12-10 23:19:26 +0100 | [diff] [blame] | 1113 | |
| 1114 | if (WARN_ON_ONCE(!desc->affinity)) |
| 1115 | return NULL; |
Thomas Gleixner | d558285 | 2021-12-18 11:25:14 +0100 | [diff] [blame] | 1116 | |
| 1117 | /* |
| 1118 | * MSI has a mask array in the descriptor. |
| 1119 | * MSI-X has a single mask. |
| 1120 | */ |
| 1121 | idx = dev->msi_enabled ? nr : 0; |
| 1122 | return &desc->affinity[idx].mask; |
Thomas Gleixner | ee8d41e | 2016-09-14 16:18:51 +0200 | [diff] [blame] | 1123 | } |
| 1124 | EXPORT_SYMBOL(pci_irq_get_affinity); |
| 1125 | |
Jiang Liu | 25a98bd | 2015-07-09 16:00:45 +0800 | [diff] [blame] | 1126 | struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) |
| 1127 | { |
| 1128 | return to_pci_dev(desc->dev); |
| 1129 | } |
Jake Oshins | a4289dc | 2015-12-10 17:52:59 +0000 | [diff] [blame] | 1130 | EXPORT_SYMBOL(msi_desc_to_pci_dev); |
Jiang Liu | 25a98bd | 2015-07-09 16:00:45 +0800 | [diff] [blame] | 1131 | |
Thomas Gleixner | aa423ac | 2021-12-06 23:27:52 +0100 | [diff] [blame] | 1132 | void pci_no_msi(void) |
Jiang Liu | 3878eae | 2014-11-11 21:02:18 +0800 | [diff] [blame] | 1133 | { |
Thomas Gleixner | aa423ac | 2021-12-06 23:27:52 +0100 | [diff] [blame] | 1134 | pci_msi_enable = 0; |
Jiang Liu | 3878eae | 2014-11-11 21:02:18 +0800 | [diff] [blame] | 1135 | } |
| 1136 | |
| 1137 | /** |
Thomas Gleixner | aa423ac | 2021-12-06 23:27:52 +0100 | [diff] [blame] | 1138 | * pci_msi_enabled - is MSI enabled? |
Jiang Liu | 3878eae | 2014-11-11 21:02:18 +0800 | [diff] [blame] | 1139 | * |
Thomas Gleixner | aa423ac | 2021-12-06 23:27:52 +0100 | [diff] [blame] | 1140 | * Returns true if MSI has not been disabled by the command-line option |
| 1141 | * pci=nomsi. |
| 1142 | **/ |
| 1143 | int pci_msi_enabled(void) |
Jiang Liu | 3878eae | 2014-11-11 21:02:18 +0800 | [diff] [blame] | 1144 | { |
Thomas Gleixner | aa423ac | 2021-12-06 23:27:52 +0100 | [diff] [blame] | 1145 | return pci_msi_enable; |
Jiang Liu | 3878eae | 2014-11-11 21:02:18 +0800 | [diff] [blame] | 1146 | } |
Thomas Gleixner | aa423ac | 2021-12-06 23:27:52 +0100 | [diff] [blame] | 1147 | EXPORT_SYMBOL(pci_msi_enabled); |