blob: 5fab4c44f124d0f4073ec8f7d963f1bc85770200 [file] [log] [blame]
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001/*
2 * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef TB_H_
8#define TB_H_
9
Andreas Noevera25c8b22014-06-03 22:04:02 +020010#include <linux/pci.h>
11
12#include "tb_regs.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020013#include "ctl.h"
14
15/**
Andreas Noevera25c8b22014-06-03 22:04:02 +020016 * struct tb_switch - a thunderbolt switch
17 */
18struct tb_switch {
19 struct tb_regs_switch_header config;
20 struct tb_port *ports;
21 struct tb *tb;
Andreas Noeverc90553b2014-06-03 22:04:11 +020022 u64 uid;
Andreas Noeverca389f72014-06-03 22:04:04 +020023 int cap_plug_events; /* offset, zero if not found */
Andreas Noever053596d2014-06-03 22:04:06 +020024 bool is_unplugged; /* unplugged, will go away */
Andreas Noevercd22e732014-06-12 23:11:46 +020025 u8 *drom;
Andreas Noevera25c8b22014-06-03 22:04:02 +020026};
27
28/**
29 * struct tb_port - a thunderbolt port, part of a tb_switch
30 */
31struct tb_port {
32 struct tb_regs_port_header config;
33 struct tb_switch *sw;
34 struct tb_port *remote; /* remote port, NULL if not connected */
Andreas Noever9da672a2014-06-03 22:04:05 +020035 int cap_phy; /* offset, zero if not found */
Andreas Noevera25c8b22014-06-03 22:04:02 +020036 u8 port; /* port number on switch */
Andreas Noevercd22e732014-06-12 23:11:46 +020037 bool disabled; /* disabled by eeprom */
38 struct tb_port *dual_link_port;
39 u8 link_nr:1;
Andreas Noevera25c8b22014-06-03 22:04:02 +020040};
41
42/**
Andreas Noever520b6702014-06-03 22:04:07 +020043 * struct tb_path_hop - routing information for a tb_path
44 *
45 * Hop configuration is always done on the IN port of a switch.
46 * in_port and out_port have to be on the same switch. Packets arriving on
47 * in_port with "hop" = in_hop_index will get routed to through out_port. The
48 * next hop to take (on out_port->remote) is determined by next_hop_index.
49 *
50 * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
51 * port.
52 */
53struct tb_path_hop {
54 struct tb_port *in_port;
55 struct tb_port *out_port;
56 int in_hop_index;
57 int in_counter_index; /* write -1 to disable counters for this hop. */
58 int next_hop_index;
59};
60
61/**
62 * enum tb_path_port - path options mask
63 */
64enum tb_path_port {
65 TB_PATH_NONE = 0,
66 TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
67 TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
68 TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
69 TB_PATH_ALL = 7,
70};
71
72/**
73 * struct tb_path - a unidirectional path between two ports
74 *
75 * A path consists of a number of hops (see tb_path_hop). To establish a PCIe
76 * tunnel two paths have to be created between the two PCIe ports.
77 *
78 */
79struct tb_path {
80 struct tb *tb;
81 int nfc_credits; /* non flow controlled credits */
82 enum tb_path_port ingress_shared_buffer;
83 enum tb_path_port egress_shared_buffer;
84 enum tb_path_port ingress_fc_enable;
85 enum tb_path_port egress_fc_enable;
86
87 int priority:3;
88 int weight:4;
89 bool drop_packages;
90 bool activated;
91 struct tb_path_hop *hops;
92 int path_length; /* number of hops */
93};
94
Mika Westerberg9d3cce02017-06-06 15:25:00 +030095/**
96 * struct tb_cm_ops - Connection manager specific operations vector
97 * @start: Starts the domain
98 * @stop: Stops the domain
99 * @suspend_noirq: Connection manager specific suspend_noirq
100 * @resume_noirq: Connection manager specific resume_noirq
101 * @hotplug: Handle hotplug event
102 */
103struct tb_cm_ops {
104 int (*start)(struct tb *tb);
105 void (*stop)(struct tb *tb);
106 int (*suspend_noirq)(struct tb *tb);
107 int (*resume_noirq)(struct tb *tb);
108 hotplug_cb hotplug;
109};
Andreas Noever520b6702014-06-03 22:04:07 +0200110
111/**
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200112 * struct tb - main thunderbolt bus structure
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300113 * @dev: Domain device
114 * @lock: Big lock. Must be held when accessing cfg or any struct
115 * tb_switch / struct tb_port.
116 * @nhi: Pointer to the NHI structure
117 * @ctl: Control channel for this domain
118 * @wq: Ordered workqueue for all domain specific work
119 * @root_switch: Root switch of this domain
120 * @cm_ops: Connection manager specific operations vector
121 * @index: Linux assigned domain number
122 * @privdata: Private connection manager specific data
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200123 */
124struct tb {
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300125 struct device dev;
126 struct mutex lock;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200127 struct tb_nhi *nhi;
128 struct tb_ctl *ctl;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300129 struct workqueue_struct *wq;
Andreas Noevera25c8b22014-06-03 22:04:02 +0200130 struct tb_switch *root_switch;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300131 const struct tb_cm_ops *cm_ops;
132 int index;
133 unsigned long privdata[0];
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200134};
135
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300136static inline void *tb_priv(struct tb *tb)
137{
138 return (void *)tb->privdata;
139}
140
Andreas Noevera25c8b22014-06-03 22:04:02 +0200141/* helper functions & macros */
142
143/**
144 * tb_upstream_port() - return the upstream port of a switch
145 *
146 * Every switch has an upstream port (for the root switch it is the NHI).
147 *
148 * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
149 * non root switches (on the NHI port remote is always NULL).
150 *
151 * Return: Returns the upstream port of the switch.
152 */
153static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
154{
155 return &sw->ports[sw->config.upstream_port_number];
156}
157
158static inline u64 tb_route(struct tb_switch *sw)
159{
160 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
161}
162
163static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
164 enum tb_cfg_space space, u32 offset, u32 length)
165{
166 return tb_cfg_read(sw->tb->ctl,
167 buffer,
168 tb_route(sw),
169 0,
170 space,
171 offset,
172 length);
173}
174
175static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
176 enum tb_cfg_space space, u32 offset, u32 length)
177{
178 return tb_cfg_write(sw->tb->ctl,
179 buffer,
180 tb_route(sw),
181 0,
182 space,
183 offset,
184 length);
185}
186
187static inline int tb_port_read(struct tb_port *port, void *buffer,
188 enum tb_cfg_space space, u32 offset, u32 length)
189{
190 return tb_cfg_read(port->sw->tb->ctl,
191 buffer,
192 tb_route(port->sw),
193 port->port,
194 space,
195 offset,
196 length);
197}
198
Mika Westerberg16a12582017-06-06 15:24:53 +0300199static inline int tb_port_write(struct tb_port *port, const void *buffer,
Andreas Noevera25c8b22014-06-03 22:04:02 +0200200 enum tb_cfg_space space, u32 offset, u32 length)
201{
202 return tb_cfg_write(port->sw->tb->ctl,
203 buffer,
204 tb_route(port->sw),
205 port->port,
206 space,
207 offset,
208 length);
209}
210
211#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
212#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
213#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
214#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
215
216
217#define __TB_SW_PRINT(level, sw, fmt, arg...) \
218 do { \
219 struct tb_switch *__sw = (sw); \
220 level(__sw->tb, "%llx: " fmt, \
221 tb_route(__sw), ## arg); \
222 } while (0)
223#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
224#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
225#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
226
227
228#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
229 do { \
230 struct tb_port *__port = (_port); \
231 level(__port->sw->tb, "%llx:%x: " fmt, \
232 tb_route(__port->sw), __port->port, ## arg); \
233 } while (0)
234#define tb_port_WARN(port, fmt, arg...) \
235 __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
236#define tb_port_warn(port, fmt, arg...) \
237 __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
238#define tb_port_info(port, fmt, arg...) \
239 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
240
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300241struct tb *tb_probe(struct tb_nhi *nhi);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200242
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300243extern struct bus_type tb_bus_type;
244extern struct device_type tb_domain_type;
245
246int tb_domain_init(void);
247void tb_domain_exit(void);
248
249struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
250int tb_domain_add(struct tb *tb);
251void tb_domain_remove(struct tb *tb);
252int tb_domain_suspend_noirq(struct tb *tb);
253int tb_domain_resume_noirq(struct tb *tb);
254
255static inline void tb_domain_put(struct tb *tb)
256{
257 put_device(&tb->dev);
258}
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200259
Andreas Noevera25c8b22014-06-03 22:04:02 +0200260struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
261void tb_switch_free(struct tb_switch *sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200262void tb_switch_suspend(struct tb_switch *sw);
263int tb_switch_resume(struct tb_switch *sw);
264int tb_switch_reset(struct tb *tb, u64 route);
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100265void tb_sw_set_unplugged(struct tb_switch *sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200266struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200267
Andreas Noever9da672a2014-06-03 22:04:05 +0200268int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
Andreas Noever520b6702014-06-03 22:04:07 +0200269int tb_port_add_nfc_credits(struct tb_port *port, int credits);
270int tb_port_clear_counter(struct tb_port *port, int counter);
Andreas Noever9da672a2014-06-03 22:04:05 +0200271
Mika Westerbergda2da042017-06-06 15:24:58 +0300272int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
273int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
Andreas Noevere2b87852014-06-03 22:04:03 +0200274
Andreas Noever520b6702014-06-03 22:04:07 +0200275struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
276void tb_path_free(struct tb_path *path);
277int tb_path_activate(struct tb_path *path);
278void tb_path_deactivate(struct tb_path *path);
279bool tb_path_is_invalid(struct tb_path *path);
280
Andreas Noevercd22e732014-06-12 23:11:46 +0200281int tb_drom_read(struct tb_switch *sw);
282int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
Andreas Noeverc90553b2014-06-03 22:04:11 +0200283
Andreas Noevera25c8b22014-06-03 22:04:02 +0200284
285static inline int tb_route_length(u64 route)
286{
287 return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
288}
289
290static inline bool tb_is_upstream_port(struct tb_port *port)
291{
292 return port == tb_upstream_port(port->sw);
293}
294
Andreas Noever9da672a2014-06-03 22:04:05 +0200295/**
296 * tb_downstream_route() - get route to downstream switch
297 *
298 * Port must not be the upstream port (otherwise a loop is created).
299 *
300 * Return: Returns a route to the switch behind @port.
301 */
302static inline u64 tb_downstream_route(struct tb_port *port)
303{
304 return tb_route(port->sw)
305 | ((u64) port->port << (port->sw->config.depth * 8));
306}
307
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200308#endif