Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Thunderbolt Cactus Ridge driver - NHI driver |
| 3 | * |
| 4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
| 5 | */ |
| 6 | |
| 7 | #ifndef DSL3510_H_ |
| 8 | #define DSL3510_H_ |
| 9 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 10 | #include <linux/idr.h> |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 11 | #include <linux/mutex.h> |
| 12 | #include <linux/workqueue.h> |
| 13 | |
| 14 | /** |
| 15 | * struct tb_nhi - thunderbolt native host interface |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 16 | * @lock: Must be held during ring creation/destruction. Is acquired by |
| 17 | * interrupt_work when dispatching interrupts to individual rings. |
| 18 | * @pdev: Pointer to the PCI device |
| 19 | * @iobase: MMIO space of the NHI |
| 20 | * @tx_rings: All Tx rings available on this host controller |
| 21 | * @rx_rings: All Rx rings available on this host controller |
| 22 | * @msix_ida: Used to allocate MSI-X vectors for rings |
| 23 | * @interrupt_work: Work scheduled to handle ring interrupt when no |
| 24 | * MSI-X is used. |
| 25 | * @hop_count: Number of rings (end point hops) supported by NHI. |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 26 | */ |
| 27 | struct tb_nhi { |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 28 | struct mutex lock; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 29 | struct pci_dev *pdev; |
| 30 | void __iomem *iobase; |
| 31 | struct tb_ring **tx_rings; |
| 32 | struct tb_ring **rx_rings; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 33 | struct ida msix_ida; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 34 | struct work_struct interrupt_work; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 35 | u32 hop_count; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 36 | }; |
| 37 | |
| 38 | /** |
| 39 | * struct tb_ring - thunderbolt TX or RX ring associated with a NHI |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 40 | * @lock: Lock serializing actions to this ring. Must be acquired after |
| 41 | * nhi->lock. |
| 42 | * @nhi: Pointer to the native host controller interface |
| 43 | * @size: Size of the ring |
| 44 | * @hop: Hop (DMA channel) associated with this ring |
| 45 | * @head: Head of the ring (write next descriptor here) |
| 46 | * @tail: Tail of the ring (complete next descriptor here) |
| 47 | * @descriptors: Allocated descriptors for this ring |
| 48 | * @queue: Queue holding frames to be transferred over this ring |
| 49 | * @in_flight: Queue holding frames that are currently in flight |
| 50 | * @work: Interrupt work structure |
| 51 | * @is_tx: Is the ring Tx or Rx |
| 52 | * @running: Is the ring running |
| 53 | * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. |
| 54 | * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) |
| 55 | * @flags: Ring specific flags |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 56 | */ |
| 57 | struct tb_ring { |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 58 | struct mutex lock; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 59 | struct tb_nhi *nhi; |
| 60 | int size; |
| 61 | int hop; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 62 | int head; |
| 63 | int tail; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 64 | struct ring_desc *descriptors; |
| 65 | dma_addr_t descriptors_dma; |
| 66 | struct list_head queue; |
| 67 | struct list_head in_flight; |
| 68 | struct work_struct work; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 69 | bool is_tx:1; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 70 | bool running:1; |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 71 | int irq; |
| 72 | u8 vector; |
| 73 | unsigned int flags; |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 74 | }; |
| 75 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 76 | /* Leave ring interrupt enabled on suspend */ |
| 77 | #define RING_FLAG_NO_SUSPEND BIT(0) |
| 78 | |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 79 | struct ring_frame; |
| 80 | typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled); |
| 81 | |
| 82 | /** |
| 83 | * struct ring_frame - for use with ring_rx/ring_tx |
| 84 | */ |
| 85 | struct ring_frame { |
| 86 | dma_addr_t buffer_phy; |
| 87 | ring_cb callback; |
| 88 | struct list_head list; |
| 89 | u32 size:12; /* TX: in, RX: out*/ |
| 90 | u32 flags:12; /* RX: out */ |
| 91 | u32 eof:4; /* TX:in, RX: out */ |
| 92 | u32 sof:4; /* TX:in, RX: out */ |
| 93 | }; |
| 94 | |
| 95 | #define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */ |
| 96 | |
Mika Westerberg | 046bee1 | 2017-06-06 15:24:57 +0300 | [diff] [blame^] | 97 | struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, |
| 98 | unsigned int flags); |
| 99 | struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, |
| 100 | unsigned int flags); |
Andreas Noever | 1660315 | 2014-06-03 22:03:58 +0200 | [diff] [blame] | 101 | void ring_start(struct tb_ring *ring); |
| 102 | void ring_stop(struct tb_ring *ring); |
| 103 | void ring_free(struct tb_ring *ring); |
| 104 | |
| 105 | int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); |
| 106 | |
| 107 | /** |
| 108 | * ring_rx() - enqueue a frame on an RX ring |
| 109 | * |
| 110 | * frame->buffer, frame->buffer_phy and frame->callback have to be set. The |
| 111 | * buffer must contain at least TB_FRAME_SIZE bytes. |
| 112 | * |
| 113 | * frame->callback will be invoked with frame->size, frame->flags, frame->eof, |
| 114 | * frame->sof set once the frame has been received. |
| 115 | * |
| 116 | * If ring_stop is called after the packet has been enqueued frame->callback |
| 117 | * will be called with canceled set to true. |
| 118 | * |
| 119 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. |
| 120 | */ |
| 121 | static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame) |
| 122 | { |
| 123 | WARN_ON(ring->is_tx); |
| 124 | return __ring_enqueue(ring, frame); |
| 125 | } |
| 126 | |
| 127 | /** |
| 128 | * ring_tx() - enqueue a frame on an TX ring |
| 129 | * |
| 130 | * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof |
| 131 | * and frame->sof have to be set. |
| 132 | * |
| 133 | * frame->callback will be invoked with once the frame has been transmitted. |
| 134 | * |
| 135 | * If ring_stop is called after the packet has been enqueued frame->callback |
| 136 | * will be called with canceled set to true. |
| 137 | * |
| 138 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. |
| 139 | */ |
| 140 | static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame) |
| 141 | { |
| 142 | WARN_ON(!ring->is_tx); |
| 143 | return __ring_enqueue(ring, frame); |
| 144 | } |
| 145 | |
| 146 | #endif |