blob: 953864ae0ab3dcaf979f997cee96e9ad09bbe8af [file] [log] [blame]
Andreas Noever16603152014-06-03 22:03:58 +02001/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef DSL3510_H_
8#define DSL3510_H_
9
Mika Westerberg046bee12017-06-06 15:24:57 +030010#include <linux/idr.h>
Andreas Noever16603152014-06-03 22:03:58 +020011#include <linux/mutex.h>
12#include <linux/workqueue.h>
13
14/**
15 * struct tb_nhi - thunderbolt native host interface
Mika Westerberg046bee12017-06-06 15:24:57 +030016 * @lock: Must be held during ring creation/destruction. Is acquired by
17 * interrupt_work when dispatching interrupts to individual rings.
18 * @pdev: Pointer to the PCI device
19 * @iobase: MMIO space of the NHI
20 * @tx_rings: All Tx rings available on this host controller
21 * @rx_rings: All Rx rings available on this host controller
22 * @msix_ida: Used to allocate MSI-X vectors for rings
Mika Westerbergbdccf292017-06-06 15:25:15 +030023 * @going_away: The host controller device is about to disappear so when
24 * this flag is set, avoid touching the hardware anymore.
Mika Westerberg046bee12017-06-06 15:24:57 +030025 * @interrupt_work: Work scheduled to handle ring interrupt when no
26 * MSI-X is used.
27 * @hop_count: Number of rings (end point hops) supported by NHI.
Andreas Noever16603152014-06-03 22:03:58 +020028 */
29struct tb_nhi {
Mika Westerberg046bee12017-06-06 15:24:57 +030030 struct mutex lock;
Andreas Noever16603152014-06-03 22:03:58 +020031 struct pci_dev *pdev;
32 void __iomem *iobase;
33 struct tb_ring **tx_rings;
34 struct tb_ring **rx_rings;
Mika Westerberg046bee12017-06-06 15:24:57 +030035 struct ida msix_ida;
Mika Westerbergbdccf292017-06-06 15:25:15 +030036 bool going_away;
Andreas Noever16603152014-06-03 22:03:58 +020037 struct work_struct interrupt_work;
Mika Westerberg046bee12017-06-06 15:24:57 +030038 u32 hop_count;
Andreas Noever16603152014-06-03 22:03:58 +020039};
40
41/**
42 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
Mika Westerberg046bee12017-06-06 15:24:57 +030043 * @lock: Lock serializing actions to this ring. Must be acquired after
44 * nhi->lock.
45 * @nhi: Pointer to the native host controller interface
46 * @size: Size of the ring
47 * @hop: Hop (DMA channel) associated with this ring
48 * @head: Head of the ring (write next descriptor here)
49 * @tail: Tail of the ring (complete next descriptor here)
50 * @descriptors: Allocated descriptors for this ring
51 * @queue: Queue holding frames to be transferred over this ring
52 * @in_flight: Queue holding frames that are currently in flight
53 * @work: Interrupt work structure
54 * @is_tx: Is the ring Tx or Rx
55 * @running: Is the ring running
56 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
57 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
58 * @flags: Ring specific flags
Andreas Noever16603152014-06-03 22:03:58 +020059 */
60struct tb_ring {
Mika Westerberg046bee12017-06-06 15:24:57 +030061 struct mutex lock;
Andreas Noever16603152014-06-03 22:03:58 +020062 struct tb_nhi *nhi;
63 int size;
64 int hop;
Mika Westerberg046bee12017-06-06 15:24:57 +030065 int head;
66 int tail;
Andreas Noever16603152014-06-03 22:03:58 +020067 struct ring_desc *descriptors;
68 dma_addr_t descriptors_dma;
69 struct list_head queue;
70 struct list_head in_flight;
71 struct work_struct work;
Mika Westerberg046bee12017-06-06 15:24:57 +030072 bool is_tx:1;
Andreas Noever16603152014-06-03 22:03:58 +020073 bool running:1;
Mika Westerberg046bee12017-06-06 15:24:57 +030074 int irq;
75 u8 vector;
76 unsigned int flags;
Andreas Noever16603152014-06-03 22:03:58 +020077};
78
Mika Westerberg046bee12017-06-06 15:24:57 +030079/* Leave ring interrupt enabled on suspend */
80#define RING_FLAG_NO_SUSPEND BIT(0)
81
Andreas Noever16603152014-06-03 22:03:58 +020082struct ring_frame;
83typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
84
85/**
86 * struct ring_frame - for use with ring_rx/ring_tx
87 */
88struct ring_frame {
89 dma_addr_t buffer_phy;
90 ring_cb callback;
91 struct list_head list;
92 u32 size:12; /* TX: in, RX: out*/
93 u32 flags:12; /* RX: out */
94 u32 eof:4; /* TX:in, RX: out */
95 u32 sof:4; /* TX:in, RX: out */
96};
97
98#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
99
Mika Westerberg046bee12017-06-06 15:24:57 +0300100struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
101 unsigned int flags);
102struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
103 unsigned int flags);
Andreas Noever16603152014-06-03 22:03:58 +0200104void ring_start(struct tb_ring *ring);
105void ring_stop(struct tb_ring *ring);
106void ring_free(struct tb_ring *ring);
107
108int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
109
110/**
111 * ring_rx() - enqueue a frame on an RX ring
112 *
113 * frame->buffer, frame->buffer_phy and frame->callback have to be set. The
114 * buffer must contain at least TB_FRAME_SIZE bytes.
115 *
116 * frame->callback will be invoked with frame->size, frame->flags, frame->eof,
117 * frame->sof set once the frame has been received.
118 *
119 * If ring_stop is called after the packet has been enqueued frame->callback
120 * will be called with canceled set to true.
121 *
122 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
123 */
124static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
125{
126 WARN_ON(ring->is_tx);
127 return __ring_enqueue(ring, frame);
128}
129
130/**
131 * ring_tx() - enqueue a frame on an TX ring
132 *
133 * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
134 * and frame->sof have to be set.
135 *
136 * frame->callback will be invoked with once the frame has been transmitted.
137 *
138 * If ring_stop is called after the packet has been enqueued frame->callback
139 * will be called with canceled set to true.
140 *
141 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
142 */
143static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
144{
145 WARN_ON(!ring->is_tx);
146 return __ring_enqueue(ring, frame);
147}
148
Mika Westerbergcd446ee22017-06-06 15:25:12 +0300149enum nhi_fw_mode {
150 NHI_FW_SAFE_MODE,
151 NHI_FW_AUTH_MODE,
152 NHI_FW_EP_MODE,
153 NHI_FW_CM_MODE,
154};
155
156enum nhi_mailbox_cmd {
157 NHI_MAILBOX_SAVE_DEVS = 0x05,
158 NHI_MAILBOX_DRV_UNLOADS = 0x07,
159 NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
160};
161
162int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
163enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
164
Mika Westerberg5e2781b2017-06-06 15:25:11 +0300165/*
166 * PCI IDs used in this driver from Win Ridge forward. There is no
167 * need for the PCI quirk anymore as we will use ICM also on Apple
168 * hardware.
169 */
170#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
171#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
172#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf
173#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0
174#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2
175#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3
176#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9
177#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da
178#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc
179#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd
180#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de
181
Andreas Noever16603152014-06-03 22:03:58 +0200182#endif