blob: 630f44140530606149dd2af502c3809c989d6703 [file] [log] [blame]
Andreas Noever16603152014-06-03 22:03:58 +02001/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef DSL3510_H_
8#define DSL3510_H_
9
Mika Westerberg046bee12017-06-06 15:24:57 +030010#include <linux/idr.h>
Andreas Noever16603152014-06-03 22:03:58 +020011#include <linux/mutex.h>
12#include <linux/workqueue.h>
13
14/**
15 * struct tb_nhi - thunderbolt native host interface
Mika Westerberg046bee12017-06-06 15:24:57 +030016 * @lock: Must be held during ring creation/destruction. Is acquired by
17 * interrupt_work when dispatching interrupts to individual rings.
18 * @pdev: Pointer to the PCI device
19 * @iobase: MMIO space of the NHI
20 * @tx_rings: All Tx rings available on this host controller
21 * @rx_rings: All Rx rings available on this host controller
22 * @msix_ida: Used to allocate MSI-X vectors for rings
23 * @interrupt_work: Work scheduled to handle ring interrupt when no
24 * MSI-X is used.
25 * @hop_count: Number of rings (end point hops) supported by NHI.
Andreas Noever16603152014-06-03 22:03:58 +020026 */
27struct tb_nhi {
Mika Westerberg046bee12017-06-06 15:24:57 +030028 struct mutex lock;
Andreas Noever16603152014-06-03 22:03:58 +020029 struct pci_dev *pdev;
30 void __iomem *iobase;
31 struct tb_ring **tx_rings;
32 struct tb_ring **rx_rings;
Mika Westerberg046bee12017-06-06 15:24:57 +030033 struct ida msix_ida;
Andreas Noever16603152014-06-03 22:03:58 +020034 struct work_struct interrupt_work;
Mika Westerberg046bee12017-06-06 15:24:57 +030035 u32 hop_count;
Andreas Noever16603152014-06-03 22:03:58 +020036};
37
38/**
39 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
Mika Westerberg046bee12017-06-06 15:24:57 +030040 * @lock: Lock serializing actions to this ring. Must be acquired after
41 * nhi->lock.
42 * @nhi: Pointer to the native host controller interface
43 * @size: Size of the ring
44 * @hop: Hop (DMA channel) associated with this ring
45 * @head: Head of the ring (write next descriptor here)
46 * @tail: Tail of the ring (complete next descriptor here)
47 * @descriptors: Allocated descriptors for this ring
48 * @queue: Queue holding frames to be transferred over this ring
49 * @in_flight: Queue holding frames that are currently in flight
50 * @work: Interrupt work structure
51 * @is_tx: Is the ring Tx or Rx
52 * @running: Is the ring running
53 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
54 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
55 * @flags: Ring specific flags
Andreas Noever16603152014-06-03 22:03:58 +020056 */
57struct tb_ring {
Mika Westerberg046bee12017-06-06 15:24:57 +030058 struct mutex lock;
Andreas Noever16603152014-06-03 22:03:58 +020059 struct tb_nhi *nhi;
60 int size;
61 int hop;
Mika Westerberg046bee12017-06-06 15:24:57 +030062 int head;
63 int tail;
Andreas Noever16603152014-06-03 22:03:58 +020064 struct ring_desc *descriptors;
65 dma_addr_t descriptors_dma;
66 struct list_head queue;
67 struct list_head in_flight;
68 struct work_struct work;
Mika Westerberg046bee12017-06-06 15:24:57 +030069 bool is_tx:1;
Andreas Noever16603152014-06-03 22:03:58 +020070 bool running:1;
Mika Westerberg046bee12017-06-06 15:24:57 +030071 int irq;
72 u8 vector;
73 unsigned int flags;
Andreas Noever16603152014-06-03 22:03:58 +020074};
75
Mika Westerberg046bee12017-06-06 15:24:57 +030076/* Leave ring interrupt enabled on suspend */
77#define RING_FLAG_NO_SUSPEND BIT(0)
78
Andreas Noever16603152014-06-03 22:03:58 +020079struct ring_frame;
80typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
81
82/**
83 * struct ring_frame - for use with ring_rx/ring_tx
84 */
85struct ring_frame {
86 dma_addr_t buffer_phy;
87 ring_cb callback;
88 struct list_head list;
89 u32 size:12; /* TX: in, RX: out*/
90 u32 flags:12; /* RX: out */
91 u32 eof:4; /* TX:in, RX: out */
92 u32 sof:4; /* TX:in, RX: out */
93};
94
95#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
96
Mika Westerberg046bee12017-06-06 15:24:57 +030097struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
98 unsigned int flags);
99struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
100 unsigned int flags);
Andreas Noever16603152014-06-03 22:03:58 +0200101void ring_start(struct tb_ring *ring);
102void ring_stop(struct tb_ring *ring);
103void ring_free(struct tb_ring *ring);
104
105int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
106
107/**
108 * ring_rx() - enqueue a frame on an RX ring
109 *
110 * frame->buffer, frame->buffer_phy and frame->callback have to be set. The
111 * buffer must contain at least TB_FRAME_SIZE bytes.
112 *
113 * frame->callback will be invoked with frame->size, frame->flags, frame->eof,
114 * frame->sof set once the frame has been received.
115 *
116 * If ring_stop is called after the packet has been enqueued frame->callback
117 * will be called with canceled set to true.
118 *
119 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
120 */
121static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
122{
123 WARN_ON(ring->is_tx);
124 return __ring_enqueue(ring, frame);
125}
126
127/**
128 * ring_tx() - enqueue a frame on an TX ring
129 *
130 * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
131 * and frame->sof have to be set.
132 *
133 * frame->callback will be invoked with once the frame has been transmitted.
134 *
135 * If ring_stop is called after the packet has been enqueued frame->callback
136 * will be called with canceled set to true.
137 *
138 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
139 */
140static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
141{
142 WARN_ON(!ring->is_tx);
143 return __ring_enqueue(ring, frame);
144}
145
146#endif