blob: 71c712300326becfec4259781a61a2dbb1533b61 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
9#include <linux/slab.h>
10#include <linux/list.h>
11
Mika Westerberg1752b9f2017-02-19 10:58:35 +020012#include "tunnel.h"
Andreas Noever3364f0c2014-06-03 22:04:08 +020013#include "tb.h"
14
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +020015/* PCIe adapters use always HopID of 8 for both directions */
16#define TB_PCI_HOPID 8
17
Mika Westerberg93f36ad2017-02-19 13:48:29 +020018#define TB_PCI_PATH_DOWN 0
19#define TB_PCI_PATH_UP 1
20
Andreas Noever3364f0c2014-06-03 22:04:08 +020021#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
22 do { \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020023 struct tb_tunnel *__tunnel = (tunnel); \
Andreas Noever3364f0c2014-06-03 22:04:08 +020024 level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020025 tb_route(__tunnel->src_port->sw), \
26 __tunnel->src_port->port, \
27 tb_route(__tunnel->dst_port->sw), \
28 __tunnel->dst_port->port, \
Andreas Noever3364f0c2014-06-03 22:04:08 +020029 ## arg); \
30 } while (0)
31
32#define tb_tunnel_WARN(tunnel, fmt, arg...) \
33 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
34#define tb_tunnel_warn(tunnel, fmt, arg...) \
35 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
36#define tb_tunnel_info(tunnel, fmt, arg...) \
37 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
Mika Westerberg0414bec2017-02-19 23:43:26 +020038#define tb_tunnel_dbg(tunnel, fmt, arg...) \
39 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
Andreas Noever3364f0c2014-06-03 22:04:08 +020040
Mika Westerberg93f36ad2017-02-19 13:48:29 +020041static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths)
42{
43 struct tb_tunnel *tunnel;
44
45 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
46 if (!tunnel)
47 return NULL;
48
49 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
50 if (!tunnel->paths) {
51 tb_tunnel_free(tunnel);
52 return NULL;
53 }
54
55 INIT_LIST_HEAD(&tunnel->list);
56 tunnel->tb = tb;
57 tunnel->npaths = npaths;
58
59 return tunnel;
60}
61
62static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
63{
64 int res;
65
66 res = tb_pci_port_enable(tunnel->src_port, activate);
67 if (res)
68 return res;
69
Mika Westerberg0414bec2017-02-19 23:43:26 +020070 if (tb_port_is_pcie_up(tunnel->dst_port))
71 return tb_pci_port_enable(tunnel->dst_port, activate);
72
73 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020074}
75
Andreas Noever3364f0c2014-06-03 22:04:08 +020076static void tb_pci_init_path(struct tb_path *path)
77{
78 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
79 path->egress_shared_buffer = TB_PATH_NONE;
80 path->ingress_fc_enable = TB_PATH_ALL;
81 path->ingress_shared_buffer = TB_PATH_NONE;
82 path->priority = 3;
83 path->weight = 1;
84 path->drop_packages = 0;
85 path->nfc_credits = 0;
Mika Westerberg0414bec2017-02-19 23:43:26 +020086 path->hops[0].initial_credits = 7;
87 path->hops[1].initial_credits = 16;
88}
89
90/**
91 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
92 * @tb: Pointer to the domain structure
93 * @down: PCIe downstream adapter
94 *
95 * If @down adapter is active, follows the tunnel to the PCIe upstream
96 * adapter and back. Returns the discovered tunnel or %NULL if there was
97 * no tunnel.
98 */
99struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
100{
101 struct tb_tunnel *tunnel;
102 struct tb_path *path;
103
104 if (!tb_pci_port_is_enabled(down))
105 return NULL;
106
107 tunnel = tb_tunnel_alloc(tb, 2);
108 if (!tunnel)
109 return NULL;
110
111 tunnel->activate = tb_pci_activate;
112 tunnel->src_port = down;
113
114 /*
115 * Discover both paths even if they are not complete. We will
116 * clean them up by calling tb_tunnel_deactivate() below in that
117 * case.
118 */
119 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
120 &tunnel->dst_port, "PCIe Up");
121 if (!path) {
122 /* Just disable the downstream port */
123 tb_pci_port_enable(down, false);
124 goto err_free;
125 }
126 tunnel->paths[TB_PCI_PATH_UP] = path;
127 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
128
129 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
130 "PCIe Down");
131 if (!path)
132 goto err_deactivate;
133 tunnel->paths[TB_PCI_PATH_DOWN] = path;
134 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
135
136 /* Validate that the tunnel is complete */
137 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
138 tb_port_warn(tunnel->dst_port,
139 "path does not end on a PCIe adapter, cleaning up\n");
140 goto err_deactivate;
141 }
142
143 if (down != tunnel->src_port) {
144 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
145 goto err_deactivate;
146 }
147
148 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
149 tb_tunnel_warn(tunnel,
150 "tunnel is not fully activated, cleaning up\n");
151 goto err_deactivate;
152 }
153
154 tb_tunnel_dbg(tunnel, "discovered\n");
155 return tunnel;
156
157err_deactivate:
158 tb_tunnel_deactivate(tunnel);
159err_free:
160 tb_tunnel_free(tunnel);
161
162 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200163}
164
165/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200166 * tb_tunnel_alloc_pci() - allocate a pci tunnel
167 * @tb: Pointer to the domain structure
168 * @up: PCIe upstream adapter port
169 * @down: PCIe downstream adapter port
Andreas Noever3364f0c2014-06-03 22:04:08 +0200170 *
171 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
172 * TB_TYPE_PCIE_DOWN.
173 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200174 * Return: Returns a tb_tunnel on success or NULL on failure.
Andreas Noever3364f0c2014-06-03 22:04:08 +0200175 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200176struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
177 struct tb_port *down)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200178{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200179 struct tb_tunnel *tunnel;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200180 struct tb_path *path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200181
182 tunnel = tb_tunnel_alloc(tb, 2);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200183 if (!tunnel)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200184 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200185
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200186 tunnel->activate = tb_pci_activate;
187 tunnel->src_port = down;
188 tunnel->dst_port = up;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200189
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200190 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
191 "PCIe Down");
192 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200193 tb_tunnel_free(tunnel);
194 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200195 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200196 tb_pci_init_path(path);
197 tunnel->paths[TB_PCI_PATH_UP] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200198
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200199 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
200 "PCIe Up");
201 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200202 tb_tunnel_free(tunnel);
203 return NULL;
204 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200205 tb_pci_init_path(path);
206 tunnel->paths[TB_PCI_PATH_DOWN] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200207
208 return tunnel;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200209}
210
211/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200212 * tb_tunnel_free() - free a tunnel
213 * @tunnel: Tunnel to be freed
Andreas Noever3364f0c2014-06-03 22:04:08 +0200214 *
215 * The tunnel must have been deactivated.
216 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200217void tb_tunnel_free(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200218{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200219 int i;
220
221 if (!tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200222 return;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200223
224 for (i = 0; i < tunnel->npaths; i++) {
225 if (tunnel->paths[i] && tunnel->paths[i]->activated) {
226 tb_tunnel_WARN(tunnel,
227 "trying to free an activated tunnel\n");
228 return;
229 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200230 }
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200231
232 for (i = 0; i < tunnel->npaths; i++) {
233 if (tunnel->paths[i])
234 tb_path_free(tunnel->paths[i]);
235 }
236
237 kfree(tunnel->paths);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200238 kfree(tunnel);
239}
240
241/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200242 * tb_tunnel_is_invalid - check whether an activated path is still valid
243 * @tunnel: Tunnel to check
Andreas Noever3364f0c2014-06-03 22:04:08 +0200244 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200245bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200246{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200247 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200248
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200249 for (i = 0; i < tunnel->npaths; i++) {
250 WARN_ON(!tunnel->paths[i]->activated);
251 if (tb_path_is_invalid(tunnel->paths[i]))
252 return true;
253 }
254
255 return false;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200256}
257
258/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200259 * tb_tunnel_restart() - activate a tunnel after a hardware reset
260 * @tunnel: Tunnel to restart
Andreas Noever3364f0c2014-06-03 22:04:08 +0200261 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200262 * Return: 0 on success and negative errno in case if failure
Andreas Noever3364f0c2014-06-03 22:04:08 +0200263 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200264int tb_tunnel_restart(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200265{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200266 int res, i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200267
268 tb_tunnel_info(tunnel, "activating\n");
269
Mika Westerbergaae9e272017-02-19 23:37:35 +0200270 /*
271 * Make sure all paths are properly disabled before enabling
272 * them again.
273 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200274 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerbergaae9e272017-02-19 23:37:35 +0200275 if (tunnel->paths[i]->activated) {
276 tb_path_deactivate(tunnel->paths[i]);
277 tunnel->paths[i]->activated = false;
278 }
279 }
280
281 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200282 res = tb_path_activate(tunnel->paths[i]);
283 if (res)
284 goto err;
285 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200286
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200287 if (tunnel->activate) {
288 res = tunnel->activate(tunnel, true);
289 if (res)
290 goto err;
291 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200292
Andreas Noever3364f0c2014-06-03 22:04:08 +0200293 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200294
Andreas Noever3364f0c2014-06-03 22:04:08 +0200295err:
296 tb_tunnel_warn(tunnel, "activation failed\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200297 tb_tunnel_deactivate(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200298 return res;
299}
300
301/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200302 * tb_tunnel_activate() - activate a tunnel
303 * @tunnel: Tunnel to activate
Andreas Noever3364f0c2014-06-03 22:04:08 +0200304 *
305 * Return: Returns 0 on success or an error code on failure.
306 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200307int tb_tunnel_activate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200308{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200309 int i;
310
311 tb_tunnel_info(tunnel, "activating\n");
312
313 for (i = 0; i < tunnel->npaths; i++) {
314 if (tunnel->paths[i]->activated) {
315 tb_tunnel_WARN(tunnel,
316 "trying to activate an already activated tunnel\n");
317 return -EINVAL;
318 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200319 }
320
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200321 return tb_tunnel_restart(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200322}
323
Andreas Noever3364f0c2014-06-03 22:04:08 +0200324/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200325 * tb_tunnel_deactivate() - deactivate a tunnel
326 * @tunnel: Tunnel to deactivate
Andreas Noever3364f0c2014-06-03 22:04:08 +0200327 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200328void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200329{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200330 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200331
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200332 tb_tunnel_info(tunnel, "deactivating\n");
333
334 if (tunnel->activate)
335 tunnel->activate(tunnel, false);
336
337 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200338 if (tunnel->paths[i] && tunnel->paths[i]->activated)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200339 tb_path_deactivate(tunnel->paths[i]);
340 }
341}