Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Thunderbolt Cactus Ridge driver - NHI driver |
| 3 | * |
| 4 | * The NHI (native host interface) is the pci device that allows us to send and |
| 5 | * receive frames from the thunderbolt bus. |
| 6 | * |
| 7 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
| 8 | */ |
| 9 | |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 10 | #include <linux/pm_runtime.h> |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 11 | #include <linux/slab.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/pci.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/module.h> |
Mika Westerberg | cd446ee2 | 2017-06-06 15:25:12 +0300 | [diff] [blame] | 16 | #include <linux/delay.h> |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 17 | |
| 18 | #include "nhi.h" |
| 19 | #include "nhi_regs.h" |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 20 | #include "tb.h" |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 21 | |
| 22 | #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") |
| 23 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 24 | /* |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 25 | * Used to enable end-to-end workaround for missing RX packets. Do not |
| 26 | * use this ring for anything else. |
| 27 | */ |
| 28 | #define RING_E2E_UNUSED_HOPID 2 |
Mika Westerberg | 9a01c7c2 | 2017-10-02 13:38:43 +0300 | [diff] [blame] | 29 | /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ |
| 30 | #define RING_FIRST_USABLE_HOPID 8 |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 31 | |
| 32 | /* |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 33 | * Minimal number of vectors when we use MSI-X. Two for control channel |
| 34 | * Rx/Tx and the rest four are for cross domain DMA paths. |
| 35 | */ |
| 36 | #define MSIX_MIN_VECS 6 |
| 37 | #define MSIX_MAX_VECS 16 |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 38 | |
Mika Westerberg | cd446ee2 | 2017-06-06 15:25:12 +0300 | [diff] [blame] | 39 | #define NHI_MAILBOX_TIMEOUT 500 /* ms */ |
| 40 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 41 | static int ring_interrupt_index(struct tb_ring *ring) |
| 42 | { |
| 43 | int bit = ring->hop; |
| 44 | if (!ring->is_tx) |
| 45 | bit += ring->nhi->hop_count; |
| 46 | return bit; |
| 47 | } |
| 48 | |
| 49 | /** |
| 50 | * ring_interrupt_active() - activate/deactivate interrupts for a single ring |
| 51 | * |
| 52 | * ring->nhi->lock must be held. |
| 53 | */ |
| 54 | static void ring_interrupt_active(struct tb_ring *ring, bool active) |
| 55 | { |
Lukas Wunner | 19bf4d4 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 56 | int reg = REG_RING_INTERRUPT_BASE + |
| 57 | ring_interrupt_index(ring) / 32 * 4; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 58 | int bit = ring_interrupt_index(ring) & 31; |
| 59 | int mask = 1 << bit; |
| 60 | u32 old, new; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 61 | |
| 62 | if (ring->irq > 0) { |
| 63 | u32 step, shift, ivr, misc; |
| 64 | void __iomem *ivr_base; |
| 65 | int index; |
| 66 | |
| 67 | if (ring->is_tx) |
| 68 | index = ring->hop; |
| 69 | else |
| 70 | index = ring->hop + ring->nhi->hop_count; |
| 71 | |
| 72 | /* |
| 73 | * Ask the hardware to clear interrupt status bits automatically |
| 74 | * since we already know which interrupt was triggered. |
| 75 | */ |
| 76 | misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); |
| 77 | if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { |
| 78 | misc |= REG_DMA_MISC_INT_AUTO_CLEAR; |
| 79 | iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); |
| 80 | } |
| 81 | |
| 82 | ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; |
| 83 | step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; |
| 84 | shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; |
| 85 | ivr = ioread32(ivr_base + step); |
| 86 | ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); |
| 87 | if (active) |
| 88 | ivr |= ring->vector << shift; |
| 89 | iowrite32(ivr, ivr_base + step); |
| 90 | } |
| 91 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 92 | old = ioread32(ring->nhi->iobase + reg); |
| 93 | if (active) |
| 94 | new = old | mask; |
| 95 | else |
| 96 | new = old & ~mask; |
| 97 | |
| 98 | dev_info(&ring->nhi->pdev->dev, |
| 99 | "%s interrupt at register %#x bit %d (%#x -> %#x)\n", |
| 100 | active ? "enabling" : "disabling", reg, bit, old, new); |
| 101 | |
| 102 | if (new == old) |
| 103 | dev_WARN(&ring->nhi->pdev->dev, |
| 104 | "interrupt for %s %d is already %s\n", |
| 105 | RING_TYPE(ring), ring->hop, |
| 106 | active ? "enabled" : "disabled"); |
| 107 | iowrite32(new, ring->nhi->iobase + reg); |
| 108 | } |
| 109 | |
| 110 | /** |
| 111 | * nhi_disable_interrupts() - disable interrupts for all rings |
| 112 | * |
| 113 | * Use only during init and shutdown. |
| 114 | */ |
| 115 | static void nhi_disable_interrupts(struct tb_nhi *nhi) |
| 116 | { |
| 117 | int i = 0; |
| 118 | /* disable interrupts */ |
| 119 | for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) |
| 120 | iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); |
| 121 | |
| 122 | /* clear interrupt status bits */ |
| 123 | for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) |
| 124 | ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); |
| 125 | } |
| 126 | |
| 127 | /* ring helper methods */ |
| 128 | |
| 129 | static void __iomem *ring_desc_base(struct tb_ring *ring) |
| 130 | { |
| 131 | void __iomem *io = ring->nhi->iobase; |
| 132 | io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; |
| 133 | io += ring->hop * 16; |
| 134 | return io; |
| 135 | } |
| 136 | |
| 137 | static void __iomem *ring_options_base(struct tb_ring *ring) |
| 138 | { |
| 139 | void __iomem *io = ring->nhi->iobase; |
| 140 | io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; |
| 141 | io += ring->hop * 32; |
| 142 | return io; |
| 143 | } |
| 144 | |
| 145 | static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) |
| 146 | { |
| 147 | iowrite16(value, ring_desc_base(ring) + offset); |
| 148 | } |
| 149 | |
| 150 | static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) |
| 151 | { |
| 152 | iowrite32(value, ring_desc_base(ring) + offset); |
| 153 | } |
| 154 | |
| 155 | static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) |
| 156 | { |
| 157 | iowrite32(value, ring_desc_base(ring) + offset); |
| 158 | iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); |
| 159 | } |
| 160 | |
| 161 | static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) |
| 162 | { |
| 163 | iowrite32(value, ring_options_base(ring) + offset); |
| 164 | } |
| 165 | |
| 166 | static bool ring_full(struct tb_ring *ring) |
| 167 | { |
| 168 | return ((ring->head + 1) % ring->size) == ring->tail; |
| 169 | } |
| 170 | |
| 171 | static bool ring_empty(struct tb_ring *ring) |
| 172 | { |
| 173 | return ring->head == ring->tail; |
| 174 | } |
| 175 | |
| 176 | /** |
| 177 | * ring_write_descriptors() - post frames from ring->queue to the controller |
| 178 | * |
| 179 | * ring->lock is held. |
| 180 | */ |
| 181 | static void ring_write_descriptors(struct tb_ring *ring) |
| 182 | { |
| 183 | struct ring_frame *frame, *n; |
| 184 | struct ring_desc *descriptor; |
| 185 | list_for_each_entry_safe(frame, n, &ring->queue, list) { |
| 186 | if (ring_full(ring)) |
| 187 | break; |
| 188 | list_move_tail(&frame->list, &ring->in_flight); |
| 189 | descriptor = &ring->descriptors[ring->head]; |
| 190 | descriptor->phys = frame->buffer_phy; |
| 191 | descriptor->time = 0; |
| 192 | descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; |
| 193 | if (ring->is_tx) { |
| 194 | descriptor->length = frame->size; |
| 195 | descriptor->eof = frame->eof; |
| 196 | descriptor->sof = frame->sof; |
| 197 | } |
| 198 | ring->head = (ring->head + 1) % ring->size; |
| 199 | ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | /** |
| 204 | * ring_work() - progress completed frames |
| 205 | * |
| 206 | * If the ring is shutting down then all frames are marked as canceled and |
| 207 | * their callbacks are invoked. |
| 208 | * |
| 209 | * Otherwise we collect all completed frame from the ring buffer, write new |
| 210 | * frame to the ring buffer and invoke the callbacks for the completed frames. |
| 211 | */ |
| 212 | static void ring_work(struct work_struct *work) |
| 213 | { |
| 214 | struct tb_ring *ring = container_of(work, typeof(*ring), work); |
| 215 | struct ring_frame *frame; |
| 216 | bool canceled = false; |
Mika Westerberg | 22b7de1 | 2017-10-02 13:38:39 +0300 | [diff] [blame] | 217 | unsigned long flags; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 218 | LIST_HEAD(done); |
Mika Westerberg | 22b7de1 | 2017-10-02 13:38:39 +0300 | [diff] [blame] | 219 | |
| 220 | spin_lock_irqsave(&ring->lock, flags); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 221 | |
| 222 | if (!ring->running) { |
| 223 | /* Move all frames to done and mark them as canceled. */ |
| 224 | list_splice_tail_init(&ring->in_flight, &done); |
| 225 | list_splice_tail_init(&ring->queue, &done); |
| 226 | canceled = true; |
| 227 | goto invoke_callback; |
| 228 | } |
| 229 | |
| 230 | while (!ring_empty(ring)) { |
| 231 | if (!(ring->descriptors[ring->tail].flags |
| 232 | & RING_DESC_COMPLETED)) |
| 233 | break; |
| 234 | frame = list_first_entry(&ring->in_flight, typeof(*frame), |
| 235 | list); |
| 236 | list_move_tail(&frame->list, &done); |
| 237 | if (!ring->is_tx) { |
| 238 | frame->size = ring->descriptors[ring->tail].length; |
| 239 | frame->eof = ring->descriptors[ring->tail].eof; |
| 240 | frame->sof = ring->descriptors[ring->tail].sof; |
| 241 | frame->flags = ring->descriptors[ring->tail].flags; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 242 | } |
| 243 | ring->tail = (ring->tail + 1) % ring->size; |
| 244 | } |
| 245 | ring_write_descriptors(ring); |
| 246 | |
| 247 | invoke_callback: |
Mika Westerberg | 22b7de1 | 2017-10-02 13:38:39 +0300 | [diff] [blame] | 248 | /* allow callbacks to schedule new work */ |
| 249 | spin_unlock_irqrestore(&ring->lock, flags); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 250 | while (!list_empty(&done)) { |
| 251 | frame = list_first_entry(&done, typeof(*frame), list); |
| 252 | /* |
| 253 | * The callback may reenqueue or delete frame. |
| 254 | * Do not hold on to it. |
| 255 | */ |
| 256 | list_del_init(&frame->list); |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 257 | if (frame->callback) |
| 258 | frame->callback(ring, frame, canceled); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 259 | } |
| 260 | } |
| 261 | |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 262 | int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 263 | { |
Mika Westerberg | 22b7de1 | 2017-10-02 13:38:39 +0300 | [diff] [blame] | 264 | unsigned long flags; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 265 | int ret = 0; |
Mika Westerberg | 22b7de1 | 2017-10-02 13:38:39 +0300 | [diff] [blame] | 266 | |
| 267 | spin_lock_irqsave(&ring->lock, flags); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 268 | if (ring->running) { |
| 269 | list_add_tail(&frame->list, &ring->queue); |
| 270 | ring_write_descriptors(ring); |
| 271 | } else { |
| 272 | ret = -ESHUTDOWN; |
| 273 | } |
Mika Westerberg | 22b7de1 | 2017-10-02 13:38:39 +0300 | [diff] [blame] | 274 | spin_unlock_irqrestore(&ring->lock, flags); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 275 | return ret; |
| 276 | } |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 277 | EXPORT_SYMBOL_GPL(__tb_ring_enqueue); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 278 | |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 279 | /** |
| 280 | * tb_ring_poll() - Poll one completed frame from the ring |
| 281 | * @ring: Ring to poll |
| 282 | * |
| 283 | * This function can be called when @start_poll callback of the @ring |
| 284 | * has been called. It will read one completed frame from the ring and |
| 285 | * return it to the caller. Returns %NULL if there is no more completed |
| 286 | * frames. |
| 287 | */ |
| 288 | struct ring_frame *tb_ring_poll(struct tb_ring *ring) |
| 289 | { |
| 290 | struct ring_frame *frame = NULL; |
| 291 | unsigned long flags; |
| 292 | |
| 293 | spin_lock_irqsave(&ring->lock, flags); |
| 294 | if (!ring->running) |
| 295 | goto unlock; |
| 296 | if (ring_empty(ring)) |
| 297 | goto unlock; |
| 298 | |
| 299 | if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { |
| 300 | frame = list_first_entry(&ring->in_flight, typeof(*frame), |
| 301 | list); |
| 302 | list_del_init(&frame->list); |
| 303 | |
| 304 | if (!ring->is_tx) { |
| 305 | frame->size = ring->descriptors[ring->tail].length; |
| 306 | frame->eof = ring->descriptors[ring->tail].eof; |
| 307 | frame->sof = ring->descriptors[ring->tail].sof; |
| 308 | frame->flags = ring->descriptors[ring->tail].flags; |
| 309 | } |
| 310 | |
| 311 | ring->tail = (ring->tail + 1) % ring->size; |
| 312 | } |
| 313 | |
| 314 | unlock: |
| 315 | spin_unlock_irqrestore(&ring->lock, flags); |
| 316 | return frame; |
| 317 | } |
| 318 | EXPORT_SYMBOL_GPL(tb_ring_poll); |
| 319 | |
| 320 | static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) |
| 321 | { |
| 322 | int idx = ring_interrupt_index(ring); |
| 323 | int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4; |
| 324 | int bit = idx % 32; |
| 325 | u32 val; |
| 326 | |
| 327 | val = ioread32(ring->nhi->iobase + reg); |
| 328 | if (mask) |
| 329 | val &= ~BIT(bit); |
| 330 | else |
| 331 | val |= BIT(bit); |
| 332 | iowrite32(val, ring->nhi->iobase + reg); |
| 333 | } |
| 334 | |
| 335 | /* Both @nhi->lock and @ring->lock should be held */ |
| 336 | static void __ring_interrupt(struct tb_ring *ring) |
| 337 | { |
| 338 | if (!ring->running) |
| 339 | return; |
| 340 | |
| 341 | if (ring->start_poll) { |
Mika Westerberg | 7465718 | 2017-12-01 15:08:05 +0300 | [diff] [blame] | 342 | __ring_interrupt_mask(ring, true); |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 343 | ring->start_poll(ring->poll_data); |
| 344 | } else { |
| 345 | schedule_work(&ring->work); |
| 346 | } |
| 347 | } |
| 348 | |
| 349 | /** |
| 350 | * tb_ring_poll_complete() - Re-start interrupt for the ring |
| 351 | * @ring: Ring to re-start the interrupt |
| 352 | * |
| 353 | * This will re-start (unmask) the ring interrupt once the user is done |
| 354 | * with polling. |
| 355 | */ |
| 356 | void tb_ring_poll_complete(struct tb_ring *ring) |
| 357 | { |
| 358 | unsigned long flags; |
| 359 | |
| 360 | spin_lock_irqsave(&ring->nhi->lock, flags); |
| 361 | spin_lock(&ring->lock); |
| 362 | if (ring->start_poll) |
| 363 | __ring_interrupt_mask(ring, false); |
| 364 | spin_unlock(&ring->lock); |
| 365 | spin_unlock_irqrestore(&ring->nhi->lock, flags); |
| 366 | } |
| 367 | EXPORT_SYMBOL_GPL(tb_ring_poll_complete); |
| 368 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 369 | static irqreturn_t ring_msix(int irq, void *data) |
| 370 | { |
| 371 | struct tb_ring *ring = data; |
| 372 | |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 373 | spin_lock(&ring->nhi->lock); |
| 374 | spin_lock(&ring->lock); |
| 375 | __ring_interrupt(ring); |
| 376 | spin_unlock(&ring->lock); |
| 377 | spin_unlock(&ring->nhi->lock); |
| 378 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 379 | return IRQ_HANDLED; |
| 380 | } |
| 381 | |
| 382 | static int ring_request_msix(struct tb_ring *ring, bool no_suspend) |
| 383 | { |
| 384 | struct tb_nhi *nhi = ring->nhi; |
| 385 | unsigned long irqflags; |
| 386 | int ret; |
| 387 | |
| 388 | if (!nhi->pdev->msix_enabled) |
| 389 | return 0; |
| 390 | |
| 391 | ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); |
| 392 | if (ret < 0) |
| 393 | return ret; |
| 394 | |
| 395 | ring->vector = ret; |
| 396 | |
| 397 | ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); |
| 398 | if (ring->irq < 0) |
| 399 | return ring->irq; |
| 400 | |
| 401 | irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; |
| 402 | return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); |
| 403 | } |
| 404 | |
| 405 | static void ring_release_msix(struct tb_ring *ring) |
| 406 | { |
| 407 | if (ring->irq <= 0) |
| 408 | return; |
| 409 | |
| 410 | free_irq(ring->irq, ring); |
| 411 | ida_simple_remove(&ring->nhi->msix_ida, ring->vector); |
| 412 | ring->vector = 0; |
| 413 | ring->irq = 0; |
| 414 | } |
| 415 | |
Mika Westerberg | 9a01c7c2 | 2017-10-02 13:38:43 +0300 | [diff] [blame] | 416 | static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) |
| 417 | { |
| 418 | int ret = 0; |
| 419 | |
| 420 | spin_lock_irq(&nhi->lock); |
| 421 | |
| 422 | if (ring->hop < 0) { |
| 423 | unsigned int i; |
| 424 | |
| 425 | /* |
| 426 | * Automatically allocate HopID from the non-reserved |
| 427 | * range 8 .. hop_count - 1. |
| 428 | */ |
| 429 | for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { |
| 430 | if (ring->is_tx) { |
| 431 | if (!nhi->tx_rings[i]) { |
| 432 | ring->hop = i; |
| 433 | break; |
| 434 | } |
| 435 | } else { |
| 436 | if (!nhi->rx_rings[i]) { |
| 437 | ring->hop = i; |
| 438 | break; |
| 439 | } |
| 440 | } |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | if (ring->hop < 0 || ring->hop >= nhi->hop_count) { |
| 445 | dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); |
| 446 | ret = -EINVAL; |
| 447 | goto err_unlock; |
| 448 | } |
| 449 | if (ring->is_tx && nhi->tx_rings[ring->hop]) { |
| 450 | dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n", |
| 451 | ring->hop); |
| 452 | ret = -EBUSY; |
| 453 | goto err_unlock; |
| 454 | } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) { |
| 455 | dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n", |
| 456 | ring->hop); |
| 457 | ret = -EBUSY; |
| 458 | goto err_unlock; |
| 459 | } |
| 460 | |
| 461 | if (ring->is_tx) |
| 462 | nhi->tx_rings[ring->hop] = ring; |
| 463 | else |
| 464 | nhi->rx_rings[ring->hop] = ring; |
| 465 | |
| 466 | err_unlock: |
| 467 | spin_unlock_irq(&nhi->lock); |
| 468 | |
| 469 | return ret; |
| 470 | } |
| 471 | |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 472 | static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, |
| 473 | bool transmit, unsigned int flags, |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 474 | u16 sof_mask, u16 eof_mask, |
| 475 | void (*start_poll)(void *), |
| 476 | void *poll_data) |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 477 | { |
| 478 | struct tb_ring *ring = NULL; |
| 479 | dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", |
| 480 | transmit ? "TX" : "RX", hop, size); |
| 481 | |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 482 | /* Tx Ring 2 is reserved for E2E workaround */ |
| 483 | if (transmit && hop == RING_E2E_UNUSED_HOPID) |
| 484 | return NULL; |
| 485 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 486 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
| 487 | if (!ring) |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 488 | return NULL; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 489 | |
Mika Westerberg | 22b7de1 | 2017-10-02 13:38:39 +0300 | [diff] [blame] | 490 | spin_lock_init(&ring->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 491 | INIT_LIST_HEAD(&ring->queue); |
| 492 | INIT_LIST_HEAD(&ring->in_flight); |
| 493 | INIT_WORK(&ring->work, ring_work); |
| 494 | |
| 495 | ring->nhi = nhi; |
| 496 | ring->hop = hop; |
| 497 | ring->is_tx = transmit; |
| 498 | ring->size = size; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 499 | ring->flags = flags; |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 500 | ring->sof_mask = sof_mask; |
| 501 | ring->eof_mask = eof_mask; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 502 | ring->head = 0; |
| 503 | ring->tail = 0; |
| 504 | ring->running = false; |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 505 | ring->start_poll = start_poll; |
| 506 | ring->poll_data = poll_data; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 507 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 508 | ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, |
| 509 | size * sizeof(*ring->descriptors), |
| 510 | &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); |
| 511 | if (!ring->descriptors) |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 512 | goto err_free_ring; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 513 | |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 514 | if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) |
| 515 | goto err_free_descs; |
| 516 | |
Mika Westerberg | 9a01c7c2 | 2017-10-02 13:38:43 +0300 | [diff] [blame] | 517 | if (nhi_alloc_hop(nhi, ring)) |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 518 | goto err_release_msix; |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 519 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 520 | return ring; |
| 521 | |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 522 | err_release_msix: |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 523 | ring_release_msix(ring); |
| 524 | err_free_descs: |
| 525 | dma_free_coherent(&ring->nhi->pdev->dev, |
| 526 | ring->size * sizeof(*ring->descriptors), |
| 527 | ring->descriptors, ring->descriptors_dma); |
| 528 | err_free_ring: |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 529 | kfree(ring); |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 530 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 531 | return NULL; |
| 532 | } |
| 533 | |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 534 | /** |
| 535 | * tb_ring_alloc_tx() - Allocate DMA ring for transmit |
| 536 | * @nhi: Pointer to the NHI the ring is to be allocated |
| 537 | * @hop: HopID (ring) to allocate |
| 538 | * @size: Number of entries in the ring |
| 539 | * @flags: Flags for the ring |
| 540 | */ |
| 541 | struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, |
| 542 | unsigned int flags) |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 543 | { |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 544 | return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 545 | } |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 546 | EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 547 | |
| 548 | /** |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 549 | * tb_ring_alloc_rx() - Allocate DMA ring for receive |
| 550 | * @nhi: Pointer to the NHI the ring is to be allocated |
Mika Westerberg | 9a01c7c2 | 2017-10-02 13:38:43 +0300 | [diff] [blame] | 551 | * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation. |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 552 | * @size: Number of entries in the ring |
| 553 | * @flags: Flags for the ring |
| 554 | * @sof_mask: Mask of PDF values that start a frame |
| 555 | * @eof_mask: Mask of PDF values that end a frame |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 556 | * @start_poll: If not %NULL the ring will call this function when an |
| 557 | * interrupt is triggered and masked, instead of callback |
| 558 | * in each Rx frame. |
| 559 | * @poll_data: Optional data passed to @start_poll |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 560 | */ |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 561 | struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 562 | unsigned int flags, u16 sof_mask, u16 eof_mask, |
| 563 | void (*start_poll)(void *), void *poll_data) |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 564 | { |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 565 | return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, |
| 566 | start_poll, poll_data); |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 567 | } |
| 568 | EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); |
| 569 | |
| 570 | /** |
| 571 | * tb_ring_start() - enable a ring |
| 572 | * |
| 573 | * Must not be invoked in parallel with tb_ring_stop(). |
| 574 | */ |
| 575 | void tb_ring_start(struct tb_ring *ring) |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 576 | { |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 577 | u16 frame_size; |
| 578 | u32 flags; |
| 579 | |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 580 | spin_lock_irq(&ring->nhi->lock); |
| 581 | spin_lock(&ring->lock); |
Mika Westerberg | bdccf29 | 2017-06-06 15:25:15 +0300 | [diff] [blame] | 582 | if (ring->nhi->going_away) |
| 583 | goto err; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 584 | if (ring->running) { |
| 585 | dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); |
| 586 | goto err; |
| 587 | } |
| 588 | dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", |
| 589 | RING_TYPE(ring), ring->hop); |
| 590 | |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 591 | if (ring->flags & RING_FLAG_FRAME) { |
| 592 | /* Means 4096 */ |
| 593 | frame_size = 0; |
| 594 | flags = RING_FLAG_ENABLE; |
| 595 | } else { |
| 596 | frame_size = TB_FRAME_SIZE; |
| 597 | flags = RING_FLAG_ENABLE | RING_FLAG_RAW; |
| 598 | } |
| 599 | |
| 600 | if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { |
| 601 | u32 hop; |
| 602 | |
| 603 | /* |
| 604 | * In order not to lose Rx packets we enable end-to-end |
| 605 | * workaround which transfers Rx credits to an unused Tx |
| 606 | * HopID. |
| 607 | */ |
| 608 | hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; |
| 609 | hop &= REG_RX_OPTIONS_E2E_HOP_MASK; |
| 610 | flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; |
| 611 | } |
| 612 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 613 | ring_iowrite64desc(ring, ring->descriptors_dma, 0); |
| 614 | if (ring->is_tx) { |
| 615 | ring_iowrite32desc(ring, ring->size, 12); |
| 616 | ring_iowrite32options(ring, 0, 4); /* time releated ? */ |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 617 | ring_iowrite32options(ring, flags, 0); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 618 | } else { |
Mika Westerberg | 9fb1e65 | 2017-10-02 13:38:36 +0300 | [diff] [blame] | 619 | u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; |
| 620 | |
| 621 | ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); |
| 622 | ring_iowrite32options(ring, sof_eof_mask, 4); |
| 623 | ring_iowrite32options(ring, flags, 0); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 624 | } |
| 625 | ring_interrupt_active(ring, true); |
| 626 | ring->running = true; |
| 627 | err: |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 628 | spin_unlock(&ring->lock); |
| 629 | spin_unlock_irq(&ring->nhi->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 630 | } |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 631 | EXPORT_SYMBOL_GPL(tb_ring_start); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 632 | |
| 633 | /** |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 634 | * tb_ring_stop() - shutdown a ring |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 635 | * |
| 636 | * Must not be invoked from a callback. |
| 637 | * |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 638 | * This method will disable the ring. Further calls to |
| 639 | * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been |
| 640 | * called. |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 641 | * |
| 642 | * All enqueued frames will be canceled and their callbacks will be executed |
| 643 | * with frame->canceled set to true (on the callback thread). This method |
| 644 | * returns only after all callback invocations have finished. |
| 645 | */ |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 646 | void tb_ring_stop(struct tb_ring *ring) |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 647 | { |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 648 | spin_lock_irq(&ring->nhi->lock); |
| 649 | spin_lock(&ring->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 650 | dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", |
| 651 | RING_TYPE(ring), ring->hop); |
Mika Westerberg | bdccf29 | 2017-06-06 15:25:15 +0300 | [diff] [blame] | 652 | if (ring->nhi->going_away) |
| 653 | goto err; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 654 | if (!ring->running) { |
| 655 | dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", |
| 656 | RING_TYPE(ring), ring->hop); |
| 657 | goto err; |
| 658 | } |
| 659 | ring_interrupt_active(ring, false); |
| 660 | |
| 661 | ring_iowrite32options(ring, 0, 0); |
| 662 | ring_iowrite64desc(ring, 0, 0); |
| 663 | ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); |
| 664 | ring_iowrite32desc(ring, 0, 12); |
| 665 | ring->head = 0; |
| 666 | ring->tail = 0; |
| 667 | ring->running = false; |
| 668 | |
| 669 | err: |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 670 | spin_unlock(&ring->lock); |
| 671 | spin_unlock_irq(&ring->nhi->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 672 | |
| 673 | /* |
| 674 | * schedule ring->work to invoke callbacks on all remaining frames. |
| 675 | */ |
| 676 | schedule_work(&ring->work); |
| 677 | flush_work(&ring->work); |
| 678 | } |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 679 | EXPORT_SYMBOL_GPL(tb_ring_stop); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 680 | |
| 681 | /* |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 682 | * tb_ring_free() - free ring |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 683 | * |
| 684 | * When this method returns all invocations of ring->callback will have |
| 685 | * finished. |
| 686 | * |
| 687 | * Ring must be stopped. |
| 688 | * |
| 689 | * Must NOT be called from ring_frame->callback! |
| 690 | */ |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 691 | void tb_ring_free(struct tb_ring *ring) |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 692 | { |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 693 | spin_lock_irq(&ring->nhi->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 694 | /* |
| 695 | * Dissociate the ring from the NHI. This also ensures that |
| 696 | * nhi_interrupt_work cannot reschedule ring->work. |
| 697 | */ |
| 698 | if (ring->is_tx) |
| 699 | ring->nhi->tx_rings[ring->hop] = NULL; |
| 700 | else |
| 701 | ring->nhi->rx_rings[ring->hop] = NULL; |
| 702 | |
| 703 | if (ring->running) { |
| 704 | dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", |
| 705 | RING_TYPE(ring), ring->hop); |
| 706 | } |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 707 | spin_unlock_irq(&ring->nhi->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 708 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 709 | ring_release_msix(ring); |
| 710 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 711 | dma_free_coherent(&ring->nhi->pdev->dev, |
| 712 | ring->size * sizeof(*ring->descriptors), |
| 713 | ring->descriptors, ring->descriptors_dma); |
| 714 | |
Sachin Kamat | f19b72c | 2014-06-20 14:32:33 +0530 | [diff] [blame] | 715 | ring->descriptors = NULL; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 716 | ring->descriptors_dma = 0; |
| 717 | |
| 718 | |
| 719 | dev_info(&ring->nhi->pdev->dev, |
| 720 | "freeing %s %d\n", |
| 721 | RING_TYPE(ring), |
| 722 | ring->hop); |
| 723 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 724 | /** |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 725 | * ring->work can no longer be scheduled (it is scheduled only |
| 726 | * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it |
| 727 | * to finish before freeing the ring. |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 728 | */ |
| 729 | flush_work(&ring->work); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 730 | kfree(ring); |
| 731 | } |
Mika Westerberg | 3b3d9f4 | 2017-10-02 13:38:37 +0300 | [diff] [blame] | 732 | EXPORT_SYMBOL_GPL(tb_ring_free); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 733 | |
Mika Westerberg | cd446ee2 | 2017-06-06 15:25:12 +0300 | [diff] [blame] | 734 | /** |
| 735 | * nhi_mailbox_cmd() - Send a command through NHI mailbox |
| 736 | * @nhi: Pointer to the NHI structure |
| 737 | * @cmd: Command to send |
| 738 | * @data: Data to be send with the command |
| 739 | * |
| 740 | * Sends mailbox command to the firmware running on NHI. Returns %0 in |
| 741 | * case of success and negative errno in case of failure. |
| 742 | */ |
| 743 | int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) |
| 744 | { |
| 745 | ktime_t timeout; |
| 746 | u32 val; |
| 747 | |
| 748 | iowrite32(data, nhi->iobase + REG_INMAIL_DATA); |
| 749 | |
| 750 | val = ioread32(nhi->iobase + REG_INMAIL_CMD); |
| 751 | val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR); |
| 752 | val |= REG_INMAIL_OP_REQUEST | cmd; |
| 753 | iowrite32(val, nhi->iobase + REG_INMAIL_CMD); |
| 754 | |
| 755 | timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT); |
| 756 | do { |
| 757 | val = ioread32(nhi->iobase + REG_INMAIL_CMD); |
| 758 | if (!(val & REG_INMAIL_OP_REQUEST)) |
| 759 | break; |
| 760 | usleep_range(10, 20); |
| 761 | } while (ktime_before(ktime_get(), timeout)); |
| 762 | |
| 763 | if (val & REG_INMAIL_OP_REQUEST) |
| 764 | return -ETIMEDOUT; |
| 765 | if (val & REG_INMAIL_ERROR) |
| 766 | return -EIO; |
| 767 | |
| 768 | return 0; |
| 769 | } |
| 770 | |
| 771 | /** |
| 772 | * nhi_mailbox_mode() - Return current firmware operation mode |
| 773 | * @nhi: Pointer to the NHI structure |
| 774 | * |
| 775 | * The function reads current firmware operation mode using NHI mailbox |
| 776 | * registers and returns it to the caller. |
| 777 | */ |
| 778 | enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) |
| 779 | { |
| 780 | u32 val; |
| 781 | |
| 782 | val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); |
| 783 | val &= REG_OUTMAIL_CMD_OPMODE_MASK; |
| 784 | val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT; |
| 785 | |
| 786 | return (enum nhi_fw_mode)val; |
| 787 | } |
| 788 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 789 | static void nhi_interrupt_work(struct work_struct *work) |
| 790 | { |
| 791 | struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); |
| 792 | int value = 0; /* Suppress uninitialized usage warning. */ |
| 793 | int bit; |
| 794 | int hop = -1; |
| 795 | int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ |
| 796 | struct tb_ring *ring; |
| 797 | |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 798 | spin_lock_irq(&nhi->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 799 | |
| 800 | /* |
| 801 | * Starting at REG_RING_NOTIFY_BASE there are three status bitfields |
| 802 | * (TX, RX, RX overflow). We iterate over the bits and read a new |
| 803 | * dwords as required. The registers are cleared on read. |
| 804 | */ |
| 805 | for (bit = 0; bit < 3 * nhi->hop_count; bit++) { |
| 806 | if (bit % 32 == 0) |
| 807 | value = ioread32(nhi->iobase |
| 808 | + REG_RING_NOTIFY_BASE |
| 809 | + 4 * (bit / 32)); |
| 810 | if (++hop == nhi->hop_count) { |
| 811 | hop = 0; |
| 812 | type++; |
| 813 | } |
| 814 | if ((value & (1 << (bit % 32))) == 0) |
| 815 | continue; |
| 816 | if (type == 2) { |
| 817 | dev_warn(&nhi->pdev->dev, |
| 818 | "RX overflow for ring %d\n", |
| 819 | hop); |
| 820 | continue; |
| 821 | } |
| 822 | if (type == 0) |
| 823 | ring = nhi->tx_rings[hop]; |
| 824 | else |
| 825 | ring = nhi->rx_rings[hop]; |
| 826 | if (ring == NULL) { |
| 827 | dev_warn(&nhi->pdev->dev, |
| 828 | "got interrupt for inactive %s ring %d\n", |
| 829 | type ? "RX" : "TX", |
| 830 | hop); |
| 831 | continue; |
| 832 | } |
Mika Westerberg | 4ffe722 | 2017-10-02 13:38:41 +0300 | [diff] [blame] | 833 | |
| 834 | spin_lock(&ring->lock); |
| 835 | __ring_interrupt(ring); |
| 836 | spin_unlock(&ring->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 837 | } |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 838 | spin_unlock_irq(&nhi->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 839 | } |
| 840 | |
| 841 | static irqreturn_t nhi_msi(int irq, void *data) |
| 842 | { |
| 843 | struct tb_nhi *nhi = data; |
| 844 | schedule_work(&nhi->interrupt_work); |
| 845 | return IRQ_HANDLED; |
| 846 | } |
| 847 | |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 848 | static int nhi_suspend_noirq(struct device *dev) |
| 849 | { |
| 850 | struct pci_dev *pdev = to_pci_dev(dev); |
| 851 | struct tb *tb = pci_get_drvdata(pdev); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 852 | |
| 853 | return tb_domain_suspend_noirq(tb); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 854 | } |
| 855 | |
Mika Westerberg | 8c6bba1 | 2017-10-02 13:38:35 +0300 | [diff] [blame] | 856 | static void nhi_enable_int_throttling(struct tb_nhi *nhi) |
| 857 | { |
| 858 | /* Throttling is specified in 256ns increments */ |
| 859 | u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256); |
| 860 | unsigned int i; |
| 861 | |
| 862 | /* |
| 863 | * Configure interrupt throttling for all vectors even if we |
| 864 | * only use few. |
| 865 | */ |
| 866 | for (i = 0; i < MSIX_MAX_VECS; i++) { |
| 867 | u32 reg = REG_INT_THROTTLING_RATE + i * 4; |
| 868 | iowrite32(throttle, nhi->iobase + reg); |
| 869 | } |
| 870 | } |
| 871 | |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 872 | static int nhi_resume_noirq(struct device *dev) |
| 873 | { |
| 874 | struct pci_dev *pdev = to_pci_dev(dev); |
| 875 | struct tb *tb = pci_get_drvdata(pdev); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 876 | |
Mika Westerberg | bdccf29 | 2017-06-06 15:25:15 +0300 | [diff] [blame] | 877 | /* |
| 878 | * Check that the device is still there. It may be that the user |
| 879 | * unplugged last device which causes the host controller to go |
| 880 | * away on PCs. |
| 881 | */ |
| 882 | if (!pci_device_is_present(pdev)) |
| 883 | tb->nhi->going_away = true; |
Mika Westerberg | 8c6bba1 | 2017-10-02 13:38:35 +0300 | [diff] [blame] | 884 | else |
| 885 | nhi_enable_int_throttling(tb->nhi); |
Mika Westerberg | bdccf29 | 2017-06-06 15:25:15 +0300 | [diff] [blame] | 886 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 887 | return tb_domain_resume_noirq(tb); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 888 | } |
| 889 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 890 | static int nhi_suspend(struct device *dev) |
| 891 | { |
| 892 | struct pci_dev *pdev = to_pci_dev(dev); |
| 893 | struct tb *tb = pci_get_drvdata(pdev); |
| 894 | |
| 895 | return tb_domain_suspend(tb); |
| 896 | } |
| 897 | |
| 898 | static void nhi_complete(struct device *dev) |
| 899 | { |
| 900 | struct pci_dev *pdev = to_pci_dev(dev); |
| 901 | struct tb *tb = pci_get_drvdata(pdev); |
| 902 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 903 | /* |
| 904 | * If we were runtime suspended when system suspend started, |
| 905 | * schedule runtime resume now. It should bring the domain back |
| 906 | * to functional state. |
| 907 | */ |
| 908 | if (pm_runtime_suspended(&pdev->dev)) |
| 909 | pm_runtime_resume(&pdev->dev); |
| 910 | else |
| 911 | tb_domain_complete(tb); |
| 912 | } |
| 913 | |
| 914 | static int nhi_runtime_suspend(struct device *dev) |
| 915 | { |
| 916 | struct pci_dev *pdev = to_pci_dev(dev); |
| 917 | struct tb *tb = pci_get_drvdata(pdev); |
| 918 | |
| 919 | return tb_domain_runtime_suspend(tb); |
| 920 | } |
| 921 | |
| 922 | static int nhi_runtime_resume(struct device *dev) |
| 923 | { |
| 924 | struct pci_dev *pdev = to_pci_dev(dev); |
| 925 | struct tb *tb = pci_get_drvdata(pdev); |
| 926 | |
| 927 | nhi_enable_int_throttling(tb->nhi); |
| 928 | return tb_domain_runtime_resume(tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 929 | } |
| 930 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 931 | static void nhi_shutdown(struct tb_nhi *nhi) |
| 932 | { |
| 933 | int i; |
| 934 | dev_info(&nhi->pdev->dev, "shutdown\n"); |
| 935 | |
| 936 | for (i = 0; i < nhi->hop_count; i++) { |
| 937 | if (nhi->tx_rings[i]) |
| 938 | dev_WARN(&nhi->pdev->dev, |
| 939 | "TX ring %d is still active\n", i); |
| 940 | if (nhi->rx_rings[i]) |
| 941 | dev_WARN(&nhi->pdev->dev, |
| 942 | "RX ring %d is still active\n", i); |
| 943 | } |
| 944 | nhi_disable_interrupts(nhi); |
| 945 | /* |
| 946 | * We have to release the irq before calling flush_work. Otherwise an |
| 947 | * already executing IRQ handler could call schedule_work again. |
| 948 | */ |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 949 | if (!nhi->pdev->msix_enabled) { |
| 950 | devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); |
| 951 | flush_work(&nhi->interrupt_work); |
| 952 | } |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 953 | ida_destroy(&nhi->msix_ida); |
| 954 | } |
| 955 | |
| 956 | static int nhi_init_msi(struct tb_nhi *nhi) |
| 957 | { |
| 958 | struct pci_dev *pdev = nhi->pdev; |
| 959 | int res, irq, nvec; |
| 960 | |
| 961 | /* In case someone left them on. */ |
| 962 | nhi_disable_interrupts(nhi); |
| 963 | |
Mika Westerberg | 8c6bba1 | 2017-10-02 13:38:35 +0300 | [diff] [blame] | 964 | nhi_enable_int_throttling(nhi); |
| 965 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 966 | ida_init(&nhi->msix_ida); |
| 967 | |
| 968 | /* |
| 969 | * The NHI has 16 MSI-X vectors or a single MSI. We first try to |
| 970 | * get all MSI-X vectors and if we succeed, each ring will have |
| 971 | * one MSI-X. If for some reason that does not work out, we |
| 972 | * fallback to a single MSI. |
| 973 | */ |
| 974 | nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, |
| 975 | PCI_IRQ_MSIX); |
| 976 | if (nvec < 0) { |
| 977 | nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); |
| 978 | if (nvec < 0) |
| 979 | return nvec; |
| 980 | |
| 981 | INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); |
| 982 | |
| 983 | irq = pci_irq_vector(nhi->pdev, 0); |
| 984 | if (irq < 0) |
| 985 | return irq; |
| 986 | |
| 987 | res = devm_request_irq(&pdev->dev, irq, nhi_msi, |
| 988 | IRQF_NO_SUSPEND, "thunderbolt", nhi); |
| 989 | if (res) { |
| 990 | dev_err(&pdev->dev, "request_irq failed, aborting\n"); |
| 991 | return res; |
| 992 | } |
| 993 | } |
| 994 | |
| 995 | return 0; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 996 | } |
| 997 | |
| 998 | static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
| 999 | { |
| 1000 | struct tb_nhi *nhi; |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1001 | struct tb *tb; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1002 | int res; |
| 1003 | |
| 1004 | res = pcim_enable_device(pdev); |
| 1005 | if (res) { |
| 1006 | dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); |
| 1007 | return res; |
| 1008 | } |
| 1009 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1010 | res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); |
| 1011 | if (res) { |
| 1012 | dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); |
| 1013 | return res; |
| 1014 | } |
| 1015 | |
| 1016 | nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); |
| 1017 | if (!nhi) |
| 1018 | return -ENOMEM; |
| 1019 | |
| 1020 | nhi->pdev = pdev; |
| 1021 | /* cannot fail - table is allocated bin pcim_iomap_regions */ |
| 1022 | nhi->iobase = pcim_iomap_table(pdev)[0]; |
| 1023 | nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; |
Lukas Wunner | 19bf4d4 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1024 | if (nhi->hop_count != 12 && nhi->hop_count != 32) |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1025 | dev_warn(&pdev->dev, "unexpected hop count: %d\n", |
| 1026 | nhi->hop_count); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1027 | |
Himangi Saraogi | 2a211f3 | 2014-07-12 01:12:43 +0530 | [diff] [blame] | 1028 | nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, |
| 1029 | sizeof(*nhi->tx_rings), GFP_KERNEL); |
| 1030 | nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, |
| 1031 | sizeof(*nhi->rx_rings), GFP_KERNEL); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1032 | if (!nhi->tx_rings || !nhi->rx_rings) |
| 1033 | return -ENOMEM; |
| 1034 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 1035 | res = nhi_init_msi(nhi); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1036 | if (res) { |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 1037 | dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1038 | return res; |
| 1039 | } |
| 1040 | |
Mika Westerberg | 59120e0 | 2017-10-02 13:38:40 +0300 | [diff] [blame] | 1041 | spin_lock_init(&nhi->lock); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1042 | |
Mika Westerberg | dba3caf | 2018-07-25 11:03:16 +0300 | [diff] [blame] | 1043 | res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
| 1044 | if (res) |
| 1045 | res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
| 1046 | if (res) { |
| 1047 | dev_err(&pdev->dev, "failed to set DMA mask\n"); |
| 1048 | return res; |
| 1049 | } |
| 1050 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1051 | pci_set_master(pdev); |
| 1052 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 1053 | tb = icm_probe(nhi); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1054 | if (!tb) |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 1055 | tb = tb_probe(nhi); |
| 1056 | if (!tb) { |
| 1057 | dev_err(&nhi->pdev->dev, |
| 1058 | "failed to determine connection manager, aborting\n"); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1059 | return -ENODEV; |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 1060 | } |
| 1061 | |
| 1062 | dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1063 | |
| 1064 | res = tb_domain_add(tb); |
| 1065 | if (res) { |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1066 | /* |
| 1067 | * At this point the RX/TX rings might already have been |
| 1068 | * activated. Do a proper shutdown. |
| 1069 | */ |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1070 | tb_domain_put(tb); |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1071 | nhi_shutdown(nhi); |
Mika Westerberg | 68a7a2a | 2017-11-24 17:48:25 +0300 | [diff] [blame] | 1072 | return res; |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1073 | } |
| 1074 | pci_set_drvdata(pdev, tb); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1075 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 1076 | pm_runtime_allow(&pdev->dev); |
| 1077 | pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); |
| 1078 | pm_runtime_use_autosuspend(&pdev->dev); |
| 1079 | pm_runtime_put_autosuspend(&pdev->dev); |
| 1080 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1081 | return 0; |
| 1082 | } |
| 1083 | |
| 1084 | static void nhi_remove(struct pci_dev *pdev) |
| 1085 | { |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1086 | struct tb *tb = pci_get_drvdata(pdev); |
| 1087 | struct tb_nhi *nhi = tb->nhi; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1088 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 1089 | pm_runtime_get_sync(&pdev->dev); |
| 1090 | pm_runtime_dont_use_autosuspend(&pdev->dev); |
| 1091 | pm_runtime_forbid(&pdev->dev); |
| 1092 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1093 | tb_domain_remove(tb); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1094 | nhi_shutdown(nhi); |
| 1095 | } |
| 1096 | |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1097 | /* |
| 1098 | * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable |
| 1099 | * the tunnels asap. A corresponding pci quirk blocks the downstream bridges |
| 1100 | * resume_noirq until we are done. |
| 1101 | */ |
| 1102 | static const struct dev_pm_ops nhi_pm_ops = { |
| 1103 | .suspend_noirq = nhi_suspend_noirq, |
| 1104 | .resume_noirq = nhi_resume_noirq, |
| 1105 | .freeze_noirq = nhi_suspend_noirq, /* |
| 1106 | * we just disable hotplug, the |
| 1107 | * pci-tunnels stay alive. |
| 1108 | */ |
Mika Westerberg | f2a659f | 2017-12-19 12:44:56 +0300 | [diff] [blame] | 1109 | .thaw_noirq = nhi_resume_noirq, |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1110 | .restore_noirq = nhi_resume_noirq, |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 1111 | .suspend = nhi_suspend, |
| 1112 | .freeze = nhi_suspend, |
| 1113 | .poweroff = nhi_suspend, |
| 1114 | .complete = nhi_complete, |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 1115 | .runtime_suspend = nhi_runtime_suspend, |
| 1116 | .runtime_resume = nhi_runtime_resume, |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1117 | }; |
| 1118 | |
Sachin Kamat | 620863f | 2014-06-20 14:32:34 +0530 | [diff] [blame] | 1119 | static struct pci_device_id nhi_ids[] = { |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1120 | /* |
| 1121 | * We have to specify class, the TB bridges use the same device and |
Lukas Wunner | 1d11140 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1122 | * vendor (sub)id on gen 1 and gen 2 controllers. |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1123 | */ |
| 1124 | { |
| 1125 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
Lukas Wunner | 1d11140 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1126 | .vendor = PCI_VENDOR_ID_INTEL, |
Lukas Wunner | 19bf4d4 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1127 | .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, |
| 1128 | .subvendor = 0x2222, .subdevice = 0x1111, |
| 1129 | }, |
| 1130 | { |
| 1131 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
| 1132 | .vendor = PCI_VENDOR_ID_INTEL, |
Lukas Wunner | 1d11140 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1133 | .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1134 | .subvendor = 0x2222, .subdevice = 0x1111, |
| 1135 | }, |
| 1136 | { |
| 1137 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
Lukas Wunner | 1d11140 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1138 | .vendor = PCI_VENDOR_ID_INTEL, |
Xavier Gnata | 82a6a81 | 2016-07-26 18:40:38 +0200 | [diff] [blame] | 1139 | .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, |
| 1140 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, |
| 1141 | }, |
| 1142 | { |
| 1143 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
| 1144 | .vendor = PCI_VENDOR_ID_INTEL, |
Lukas Wunner | 1d11140 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1145 | .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, |
Knuth Posern | a42fb35 | 2015-09-20 21:25:22 +0200 | [diff] [blame] | 1146 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1147 | }, |
Mika Westerberg | 5e2781b | 2017-06-06 15:25:11 +0300 | [diff] [blame] | 1148 | |
| 1149 | /* Thunderbolt 3 */ |
| 1150 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, |
| 1151 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, |
| 1152 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, |
| 1153 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, |
| 1154 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, |
| 1155 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, |
| 1156 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, |
| 1157 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 1158 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, |
| 1159 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, |
Mika Westerberg | 5e2781b | 2017-06-06 15:25:11 +0300 | [diff] [blame] | 1160 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1161 | { 0,} |
| 1162 | }; |
| 1163 | |
| 1164 | MODULE_DEVICE_TABLE(pci, nhi_ids); |
| 1165 | MODULE_LICENSE("GPL"); |
| 1166 | |
| 1167 | static struct pci_driver nhi_driver = { |
| 1168 | .name = "thunderbolt", |
| 1169 | .id_table = nhi_ids, |
| 1170 | .probe = nhi_probe, |
| 1171 | .remove = nhi_remove, |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1172 | .driver.pm = &nhi_pm_ops, |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1173 | }; |
| 1174 | |
| 1175 | static int __init nhi_init(void) |
| 1176 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1177 | int ret; |
| 1178 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1179 | ret = tb_domain_init(); |
| 1180 | if (ret) |
| 1181 | return ret; |
| 1182 | ret = pci_register_driver(&nhi_driver); |
| 1183 | if (ret) |
| 1184 | tb_domain_exit(); |
| 1185 | return ret; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1186 | } |
| 1187 | |
| 1188 | static void __exit nhi_unload(void) |
| 1189 | { |
| 1190 | pci_unregister_driver(&nhi_driver); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1191 | tb_domain_exit(); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1192 | } |
| 1193 | |
Mika Westerberg | acb40d8 | 2017-10-09 16:22:34 +0300 | [diff] [blame] | 1194 | fs_initcall(nhi_init); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1195 | module_exit(nhi_unload); |