Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
| 2 | |
| 3 | #include <linux/irq.h> |
| 4 | #include <linux/module.h> |
| 5 | #include <linux/ntb.h> |
| 6 | #include <linux/msi.h> |
| 7 | #include <linux/pci.h> |
| 8 | |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 9 | struct ntb_msi { |
| 10 | u64 base_addr; |
| 11 | u64 end_addr; |
| 12 | |
| 13 | void (*desc_changed)(void *ctx); |
| 14 | |
| 15 | u32 __iomem *peer_mws[]; |
| 16 | }; |
| 17 | |
| 18 | /** |
| 19 | * ntb_msi_init() - Initialize the MSI context |
| 20 | * @ntb: NTB device context |
| 21 | * |
| 22 | * This function must be called before any other ntb_msi function. |
| 23 | * It initializes the context for MSI operations and maps |
| 24 | * the peer memory windows. |
| 25 | * |
| 26 | * This function reserves the last N outbound memory windows (where N |
| 27 | * is the number of peers). |
| 28 | * |
| 29 | * Return: Zero on success, otherwise a negative error number. |
| 30 | */ |
| 31 | int ntb_msi_init(struct ntb_dev *ntb, |
| 32 | void (*desc_changed)(void *ctx)) |
| 33 | { |
| 34 | phys_addr_t mw_phys_addr; |
| 35 | resource_size_t mw_size; |
| 36 | size_t struct_size; |
| 37 | int peer_widx; |
| 38 | int peers; |
| 39 | int ret; |
| 40 | int i; |
| 41 | |
| 42 | peers = ntb_peer_port_count(ntb); |
| 43 | if (peers <= 0) |
| 44 | return -EINVAL; |
| 45 | |
| 46 | struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers; |
| 47 | |
| 48 | ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL); |
| 49 | if (!ntb->msi) |
| 50 | return -ENOMEM; |
| 51 | |
| 52 | ntb->msi->desc_changed = desc_changed; |
| 53 | |
| 54 | for (i = 0; i < peers; i++) { |
| 55 | peer_widx = ntb_peer_mw_count(ntb) - 1 - i; |
| 56 | |
| 57 | ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, |
| 58 | &mw_size); |
| 59 | if (ret) |
| 60 | goto unroll; |
| 61 | |
| 62 | ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr, |
| 63 | mw_size); |
| 64 | if (!ntb->msi->peer_mws[i]) { |
| 65 | ret = -EFAULT; |
| 66 | goto unroll; |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | return 0; |
| 71 | |
| 72 | unroll: |
| 73 | for (i = 0; i < peers; i++) |
| 74 | if (ntb->msi->peer_mws[i]) |
| 75 | devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]); |
| 76 | |
| 77 | devm_kfree(&ntb->dev, ntb->msi); |
| 78 | ntb->msi = NULL; |
| 79 | return ret; |
| 80 | } |
| 81 | EXPORT_SYMBOL(ntb_msi_init); |
| 82 | |
| 83 | /** |
| 84 | * ntb_msi_setup_mws() - Initialize the MSI inbound memory windows |
| 85 | * @ntb: NTB device context |
| 86 | * |
| 87 | * This function sets up the required inbound memory windows. It should be |
| 88 | * called from a work function after a link up event. |
| 89 | * |
| 90 | * Over the entire network, this function will reserves the last N |
| 91 | * inbound memory windows for each peer (where N is the number of peers). |
| 92 | * |
| 93 | * ntb_msi_init() must be called before this function. |
| 94 | * |
| 95 | * Return: Zero on success, otherwise a negative error number. |
| 96 | */ |
| 97 | int ntb_msi_setup_mws(struct ntb_dev *ntb) |
| 98 | { |
| 99 | struct msi_desc *desc; |
| 100 | u64 addr; |
| 101 | int peer, peer_widx; |
| 102 | resource_size_t addr_align, size_align, size_max; |
| 103 | resource_size_t mw_size = SZ_32K; |
| 104 | resource_size_t mw_min_size = mw_size; |
| 105 | int i; |
| 106 | int ret; |
| 107 | |
| 108 | if (!ntb->msi) |
| 109 | return -EINVAL; |
| 110 | |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 111 | msi_lock_descs(&ntb->pdev->dev); |
| 112 | desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED); |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 113 | addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 114 | msi_unlock_descs(&ntb->pdev->dev); |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 115 | |
| 116 | for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { |
| 117 | peer_widx = ntb_peer_highest_mw_idx(ntb, peer); |
| 118 | if (peer_widx < 0) |
| 119 | return peer_widx; |
| 120 | |
| 121 | ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align, |
| 122 | NULL, NULL); |
| 123 | if (ret) |
| 124 | return ret; |
| 125 | |
| 126 | addr &= ~(addr_align - 1); |
| 127 | } |
| 128 | |
| 129 | for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { |
| 130 | peer_widx = ntb_peer_highest_mw_idx(ntb, peer); |
| 131 | if (peer_widx < 0) { |
| 132 | ret = peer_widx; |
| 133 | goto error_out; |
| 134 | } |
| 135 | |
| 136 | ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL, |
| 137 | &size_align, &size_max); |
| 138 | if (ret) |
| 139 | goto error_out; |
| 140 | |
| 141 | mw_size = round_up(mw_size, size_align); |
| 142 | mw_size = max(mw_size, size_max); |
| 143 | if (mw_size < mw_min_size) |
| 144 | mw_min_size = mw_size; |
| 145 | |
| 146 | ret = ntb_mw_set_trans(ntb, peer, peer_widx, |
| 147 | addr, mw_size); |
| 148 | if (ret) |
| 149 | goto error_out; |
| 150 | } |
| 151 | |
| 152 | ntb->msi->base_addr = addr; |
| 153 | ntb->msi->end_addr = addr + mw_min_size; |
| 154 | |
| 155 | return 0; |
| 156 | |
| 157 | error_out: |
| 158 | for (i = 0; i < peer; i++) { |
| 159 | peer_widx = ntb_peer_highest_mw_idx(ntb, peer); |
| 160 | if (peer_widx < 0) |
| 161 | continue; |
| 162 | |
| 163 | ntb_mw_clear_trans(ntb, i, peer_widx); |
| 164 | } |
| 165 | |
| 166 | return ret; |
| 167 | } |
| 168 | EXPORT_SYMBOL(ntb_msi_setup_mws); |
| 169 | |
| 170 | /** |
| 171 | * ntb_msi_clear_mws() - Clear all inbound memory windows |
| 172 | * @ntb: NTB device context |
| 173 | * |
| 174 | * This function tears down the resources used by ntb_msi_setup_mws(). |
| 175 | */ |
| 176 | void ntb_msi_clear_mws(struct ntb_dev *ntb) |
| 177 | { |
| 178 | int peer; |
| 179 | int peer_widx; |
| 180 | |
| 181 | for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { |
| 182 | peer_widx = ntb_peer_highest_mw_idx(ntb, peer); |
| 183 | if (peer_widx < 0) |
| 184 | continue; |
| 185 | |
| 186 | ntb_mw_clear_trans(ntb, peer, peer_widx); |
| 187 | } |
| 188 | } |
| 189 | EXPORT_SYMBOL(ntb_msi_clear_mws); |
| 190 | |
| 191 | struct ntb_msi_devres { |
| 192 | struct ntb_dev *ntb; |
| 193 | struct msi_desc *entry; |
| 194 | struct ntb_msi_desc *msi_desc; |
| 195 | }; |
| 196 | |
| 197 | static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry, |
| 198 | struct ntb_msi_desc *msi_desc) |
| 199 | { |
| 200 | u64 addr; |
| 201 | |
| 202 | addr = entry->msg.address_lo + |
| 203 | ((uint64_t)entry->msg.address_hi << 32); |
| 204 | |
| 205 | if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) { |
| 206 | dev_warn_once(&ntb->dev, |
| 207 | "IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n", |
| 208 | entry->irq, addr, ntb->msi->base_addr, |
| 209 | ntb->msi->end_addr); |
| 210 | return -EFAULT; |
| 211 | } |
| 212 | |
| 213 | msi_desc->addr_offset = addr - ntb->msi->base_addr; |
| 214 | msi_desc->data = entry->msg.data; |
| 215 | |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | static void ntb_msi_write_msg(struct msi_desc *entry, void *data) |
| 220 | { |
| 221 | struct ntb_msi_devres *dr = data; |
| 222 | |
| 223 | WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc)); |
| 224 | |
| 225 | if (dr->ntb->msi->desc_changed) |
| 226 | dr->ntb->msi->desc_changed(dr->ntb->ctx); |
| 227 | } |
| 228 | |
| 229 | static void ntbm_msi_callback_release(struct device *dev, void *res) |
| 230 | { |
| 231 | struct ntb_msi_devres *dr = res; |
| 232 | |
| 233 | dr->entry->write_msi_msg = NULL; |
| 234 | dr->entry->write_msi_msg_data = NULL; |
| 235 | } |
| 236 | |
| 237 | static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry, |
| 238 | struct ntb_msi_desc *msi_desc) |
| 239 | { |
| 240 | struct ntb_msi_devres *dr; |
| 241 | |
| 242 | dr = devres_alloc(ntbm_msi_callback_release, |
| 243 | sizeof(struct ntb_msi_devres), GFP_KERNEL); |
| 244 | if (!dr) |
| 245 | return -ENOMEM; |
| 246 | |
| 247 | dr->ntb = ntb; |
| 248 | dr->entry = entry; |
| 249 | dr->msi_desc = msi_desc; |
| 250 | |
| 251 | devres_add(&ntb->dev, dr); |
| 252 | |
| 253 | dr->entry->write_msi_msg = ntb_msi_write_msg; |
| 254 | dr->entry->write_msi_msg_data = dr; |
| 255 | |
| 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | /** |
| 260 | * ntbm_msi_request_threaded_irq() - allocate an MSI interrupt |
| 261 | * @ntb: NTB device context |
| 262 | * @handler: Function to be called when the IRQ occurs |
| 263 | * @thread_fn: Function to be called in a threaded interrupt context. NULL |
| 264 | * for clients which handle everything in @handler |
| 265 | * @devname: An ascii name for the claiming device, dev_name(dev) if NULL |
| 266 | * @dev_id: A cookie passed back to the handler function |
| 267 | * |
| 268 | * This function assigns an interrupt handler to an unused |
| 269 | * MSI interrupt and returns the descriptor used to trigger |
| 270 | * it. The descriptor can then be sent to a peer to trigger |
| 271 | * the interrupt. |
| 272 | * |
| 273 | * The interrupt resource is managed with devres so it will |
| 274 | * be automatically freed when the NTB device is torn down. |
| 275 | * |
| 276 | * If an IRQ allocated with this function needs to be freed |
| 277 | * separately, ntbm_free_irq() must be used. |
| 278 | * |
| 279 | * Return: IRQ number assigned on success, otherwise a negative error number. |
| 280 | */ |
| 281 | int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, |
| 282 | irq_handler_t thread_fn, |
| 283 | const char *name, void *dev_id, |
| 284 | struct ntb_msi_desc *msi_desc) |
| 285 | { |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 286 | struct device *dev = &ntb->pdev->dev; |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 287 | struct msi_desc *entry; |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 288 | int ret; |
| 289 | |
| 290 | if (!ntb->msi) |
| 291 | return -EINVAL; |
| 292 | |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 293 | msi_lock_descs(dev); |
| 294 | msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) { |
Thomas Gleixner | 1110918 | 2020-12-10 20:25:53 +0100 | [diff] [blame] | 295 | if (irq_has_action(entry->irq)) |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 296 | continue; |
| 297 | |
| 298 | ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler, |
| 299 | thread_fn, 0, name, dev_id); |
| 300 | if (ret) |
| 301 | continue; |
| 302 | |
| 303 | if (ntb_msi_set_desc(ntb, entry, msi_desc)) { |
| 304 | devm_free_irq(&ntb->dev, entry->irq, dev_id); |
| 305 | continue; |
| 306 | } |
| 307 | |
| 308 | ret = ntbm_msi_setup_callback(ntb, entry, msi_desc); |
| 309 | if (ret) { |
| 310 | devm_free_irq(&ntb->dev, entry->irq, dev_id); |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 311 | goto unlock; |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 312 | } |
| 313 | |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 314 | ret = entry->irq; |
| 315 | goto unlock; |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 316 | } |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 317 | ret = -ENODEV; |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 318 | |
Thomas Gleixner | 68e3183 | 2021-12-06 23:51:34 +0100 | [diff] [blame^] | 319 | unlock: |
| 320 | msi_unlock_descs(dev); |
| 321 | return ret; |
Logan Gunthorpe | 26b3a37 | 2019-05-23 16:30:56 -0600 | [diff] [blame] | 322 | } |
| 323 | EXPORT_SYMBOL(ntbm_msi_request_threaded_irq); |
| 324 | |
| 325 | static int ntbm_msi_callback_match(struct device *dev, void *res, void *data) |
| 326 | { |
| 327 | struct ntb_dev *ntb = dev_ntb(dev); |
| 328 | struct ntb_msi_devres *dr = res; |
| 329 | |
| 330 | return dr->ntb == ntb && dr->entry == data; |
| 331 | } |
| 332 | |
| 333 | /** |
| 334 | * ntbm_msi_free_irq() - free an interrupt |
| 335 | * @ntb: NTB device context |
| 336 | * @irq: Interrupt line to free |
| 337 | * @dev_id: Device identity to free |
| 338 | * |
| 339 | * This function should be used to manually free IRQs allocated with |
| 340 | * ntbm_request_[threaded_]irq(). |
| 341 | */ |
| 342 | void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id) |
| 343 | { |
| 344 | struct msi_desc *entry = irq_get_msi_desc(irq); |
| 345 | |
| 346 | entry->write_msi_msg = NULL; |
| 347 | entry->write_msi_msg_data = NULL; |
| 348 | |
| 349 | WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release, |
| 350 | ntbm_msi_callback_match, entry)); |
| 351 | |
| 352 | devm_free_irq(&ntb->dev, irq, dev_id); |
| 353 | } |
| 354 | EXPORT_SYMBOL(ntbm_msi_free_irq); |
| 355 | |
| 356 | /** |
| 357 | * ntb_msi_peer_trigger() - Trigger an interrupt handler on a peer |
| 358 | * @ntb: NTB device context |
| 359 | * @peer: Peer index |
| 360 | * @desc: MSI descriptor data which triggers the interrupt |
| 361 | * |
| 362 | * This function triggers an interrupt on a peer. It requires |
| 363 | * the descriptor structure to have been passed from that peer |
| 364 | * by some other means. |
| 365 | * |
| 366 | * Return: Zero on success, otherwise a negative error number. |
| 367 | */ |
| 368 | int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, |
| 369 | struct ntb_msi_desc *desc) |
| 370 | { |
| 371 | int idx; |
| 372 | |
| 373 | if (!ntb->msi) |
| 374 | return -EINVAL; |
| 375 | |
| 376 | idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]); |
| 377 | |
| 378 | iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]); |
| 379 | |
| 380 | return 0; |
| 381 | } |
| 382 | EXPORT_SYMBOL(ntb_msi_peer_trigger); |
| 383 | |
| 384 | /** |
| 385 | * ntb_msi_peer_addr() - Get the DMA address to trigger a peer's MSI interrupt |
| 386 | * @ntb: NTB device context |
| 387 | * @peer: Peer index |
| 388 | * @desc: MSI descriptor data which triggers the interrupt |
| 389 | * @msi_addr: Physical address to trigger the interrupt |
| 390 | * |
| 391 | * This function allows using DMA engines to trigger an interrupt |
| 392 | * (for example, trigger an interrupt to process the data after |
| 393 | * sending it). To trigger the interrupt, write @desc.data to the address |
| 394 | * returned in @msi_addr |
| 395 | * |
| 396 | * Return: Zero on success, otherwise a negative error number. |
| 397 | */ |
| 398 | int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, |
| 399 | struct ntb_msi_desc *desc, |
| 400 | phys_addr_t *msi_addr) |
| 401 | { |
| 402 | int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer; |
| 403 | phys_addr_t mw_phys_addr; |
| 404 | int ret; |
| 405 | |
| 406 | ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL); |
| 407 | if (ret) |
| 408 | return ret; |
| 409 | |
| 410 | if (msi_addr) |
| 411 | *msi_addr = mw_phys_addr + desc->addr_offset; |
| 412 | |
| 413 | return 0; |
| 414 | } |
| 415 | EXPORT_SYMBOL(ntb_msi_peer_addr); |