Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Thunderbolt Cactus Ridge driver - NHI driver |
| 3 | * |
| 4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
| 5 | */ |
| 6 | |
| 7 | #ifndef DSL3510_H_ |
| 8 | #define DSL3510_H_ |
| 9 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 10 | #include <linux/idr.h> |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 11 | #include <linux/mutex.h> |
| 12 | #include <linux/workqueue.h> |
| 13 | |
| 14 | /** |
| 15 | * struct tb_nhi - thunderbolt native host interface |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 16 | * @lock: Must be held during ring creation/destruction. Is acquired by |
| 17 | * interrupt_work when dispatching interrupts to individual rings. |
| 18 | * @pdev: Pointer to the PCI device |
| 19 | * @iobase: MMIO space of the NHI |
| 20 | * @tx_rings: All Tx rings available on this host controller |
| 21 | * @rx_rings: All Rx rings available on this host controller |
| 22 | * @msix_ida: Used to allocate MSI-X vectors for rings |
Mika Westerberg | bdccf29 | 2017-06-06 15:25:15 +0300 | [diff] [blame] | 23 | * @going_away: The host controller device is about to disappear so when |
| 24 | * this flag is set, avoid touching the hardware anymore. |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 25 | * @interrupt_work: Work scheduled to handle ring interrupt when no |
| 26 | * MSI-X is used. |
| 27 | * @hop_count: Number of rings (end point hops) supported by NHI. |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 28 | */ |
| 29 | struct tb_nhi { |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 30 | struct mutex lock; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 31 | struct pci_dev *pdev; |
| 32 | void __iomem *iobase; |
| 33 | struct tb_ring **tx_rings; |
| 34 | struct tb_ring **rx_rings; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 35 | struct ida msix_ida; |
Mika Westerberg | bdccf29 | 2017-06-06 15:25:15 +0300 | [diff] [blame] | 36 | bool going_away; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 37 | struct work_struct interrupt_work; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 38 | u32 hop_count; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 39 | }; |
| 40 | |
| 41 | /** |
| 42 | * struct tb_ring - thunderbolt TX or RX ring associated with a NHI |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 43 | * @lock: Lock serializing actions to this ring. Must be acquired after |
| 44 | * nhi->lock. |
| 45 | * @nhi: Pointer to the native host controller interface |
| 46 | * @size: Size of the ring |
| 47 | * @hop: Hop (DMA channel) associated with this ring |
| 48 | * @head: Head of the ring (write next descriptor here) |
| 49 | * @tail: Tail of the ring (complete next descriptor here) |
| 50 | * @descriptors: Allocated descriptors for this ring |
| 51 | * @queue: Queue holding frames to be transferred over this ring |
| 52 | * @in_flight: Queue holding frames that are currently in flight |
| 53 | * @work: Interrupt work structure |
| 54 | * @is_tx: Is the ring Tx or Rx |
| 55 | * @running: Is the ring running |
| 56 | * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. |
| 57 | * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) |
| 58 | * @flags: Ring specific flags |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 59 | */ |
| 60 | struct tb_ring { |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 61 | struct mutex lock; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 62 | struct tb_nhi *nhi; |
| 63 | int size; |
| 64 | int hop; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 65 | int head; |
| 66 | int tail; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 67 | struct ring_desc *descriptors; |
| 68 | dma_addr_t descriptors_dma; |
| 69 | struct list_head queue; |
| 70 | struct list_head in_flight; |
| 71 | struct work_struct work; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 72 | bool is_tx:1; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 73 | bool running:1; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 74 | int irq; |
| 75 | u8 vector; |
| 76 | unsigned int flags; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 77 | }; |
| 78 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 79 | /* Leave ring interrupt enabled on suspend */ |
| 80 | #define RING_FLAG_NO_SUSPEND BIT(0) |
| 81 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 82 | struct ring_frame; |
| 83 | typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled); |
| 84 | |
| 85 | /** |
| 86 | * struct ring_frame - for use with ring_rx/ring_tx |
| 87 | */ |
| 88 | struct ring_frame { |
| 89 | dma_addr_t buffer_phy; |
| 90 | ring_cb callback; |
| 91 | struct list_head list; |
| 92 | u32 size:12; /* TX: in, RX: out*/ |
| 93 | u32 flags:12; /* RX: out */ |
| 94 | u32 eof:4; /* TX:in, RX: out */ |
| 95 | u32 sof:4; /* TX:in, RX: out */ |
| 96 | }; |
| 97 | |
| 98 | #define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */ |
| 99 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame] | 100 | struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, |
| 101 | unsigned int flags); |
| 102 | struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, |
| 103 | unsigned int flags); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 104 | void ring_start(struct tb_ring *ring); |
| 105 | void ring_stop(struct tb_ring *ring); |
| 106 | void ring_free(struct tb_ring *ring); |
| 107 | |
| 108 | int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); |
| 109 | |
| 110 | /** |
| 111 | * ring_rx() - enqueue a frame on an RX ring |
| 112 | * |
| 113 | * frame->buffer, frame->buffer_phy and frame->callback have to be set. The |
| 114 | * buffer must contain at least TB_FRAME_SIZE bytes. |
| 115 | * |
| 116 | * frame->callback will be invoked with frame->size, frame->flags, frame->eof, |
| 117 | * frame->sof set once the frame has been received. |
| 118 | * |
| 119 | * If ring_stop is called after the packet has been enqueued frame->callback |
| 120 | * will be called with canceled set to true. |
| 121 | * |
| 122 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. |
| 123 | */ |
| 124 | static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame) |
| 125 | { |
| 126 | WARN_ON(ring->is_tx); |
| 127 | return __ring_enqueue(ring, frame); |
| 128 | } |
| 129 | |
| 130 | /** |
| 131 | * ring_tx() - enqueue a frame on an TX ring |
| 132 | * |
| 133 | * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof |
| 134 | * and frame->sof have to be set. |
| 135 | * |
| 136 | * frame->callback will be invoked with once the frame has been transmitted. |
| 137 | * |
| 138 | * If ring_stop is called after the packet has been enqueued frame->callback |
| 139 | * will be called with canceled set to true. |
| 140 | * |
| 141 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. |
| 142 | */ |
| 143 | static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame) |
| 144 | { |
| 145 | WARN_ON(!ring->is_tx); |
| 146 | return __ring_enqueue(ring, frame); |
| 147 | } |
| 148 | |
Mika Westerberg | cd446ee2 | 2017-06-06 15:25:12 +0300 | [diff] [blame] | 149 | enum nhi_fw_mode { |
| 150 | NHI_FW_SAFE_MODE, |
| 151 | NHI_FW_AUTH_MODE, |
| 152 | NHI_FW_EP_MODE, |
| 153 | NHI_FW_CM_MODE, |
| 154 | }; |
| 155 | |
| 156 | enum nhi_mailbox_cmd { |
| 157 | NHI_MAILBOX_SAVE_DEVS = 0x05, |
| 158 | NHI_MAILBOX_DRV_UNLOADS = 0x07, |
| 159 | NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, |
| 160 | }; |
| 161 | |
| 162 | int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); |
| 163 | enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); |
| 164 | |
Mika Westerberg | 5e2781b | 2017-06-06 15:25:11 +0300 | [diff] [blame] | 165 | /* |
| 166 | * PCI IDs used in this driver from Win Ridge forward. There is no |
| 167 | * need for the PCI quirk anymore as we will use ICM also on Apple |
| 168 | * hardware. |
| 169 | */ |
| 170 | #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d |
| 171 | #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e |
| 172 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf |
| 173 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0 |
| 174 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2 |
| 175 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3 |
| 176 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9 |
| 177 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da |
| 178 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc |
| 179 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd |
| 180 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de |
| 181 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 182 | #endif |