blob: 45f7a50a48ff9bd15f587eaafa73b1cc3093de0d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
Mika Westerbergde718ac2019-02-15 18:18:47 +02009#include <linux/delay.h>
Andreas Noever3364f0c2014-06-03 22:04:08 +020010#include <linux/slab.h>
11#include <linux/list.h>
12
Mika Westerberg1752b9f2017-02-19 10:58:35 +020013#include "tunnel.h"
Andreas Noever3364f0c2014-06-03 22:04:08 +020014#include "tb.h"
15
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +020016/* PCIe adapters use always HopID of 8 for both directions */
17#define TB_PCI_HOPID 8
18
Mika Westerberg93f36ad2017-02-19 13:48:29 +020019#define TB_PCI_PATH_DOWN 0
20#define TB_PCI_PATH_UP 1
21
Rajmohan Manie6f81852019-12-17 15:33:44 +030022/* USB3 adapters use always HopID of 8 for both directions */
23#define TB_USB3_HOPID 8
24
25#define TB_USB3_PATH_DOWN 0
26#define TB_USB3_PATH_UP 1
27
Mika Westerberg4f807e42018-09-17 16:30:49 +030028/* DP adapters use HopID 8 for AUX and 9 for Video */
29#define TB_DP_AUX_TX_HOPID 8
30#define TB_DP_AUX_RX_HOPID 8
31#define TB_DP_VIDEO_HOPID 9
32
33#define TB_DP_VIDEO_PATH_OUT 0
34#define TB_DP_AUX_PATH_OUT 1
35#define TB_DP_AUX_PATH_IN 2
36
Mika Westerberg44242d62018-09-28 16:35:32 +030037#define TB_DMA_PATH_OUT 0
38#define TB_DMA_PATH_IN 1
39
Rajmohan Manie6f81852019-12-17 15:33:44 +030040static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
Mika Westerberg4f807e42018-09-17 16:30:49 +030041
Andreas Noever3364f0c2014-06-03 22:04:08 +020042#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
43 do { \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020044 struct tb_tunnel *__tunnel = (tunnel); \
Mika Westerberg4f807e42018-09-17 16:30:49 +030045 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020046 tb_route(__tunnel->src_port->sw), \
47 __tunnel->src_port->port, \
48 tb_route(__tunnel->dst_port->sw), \
49 __tunnel->dst_port->port, \
Mika Westerberg4f807e42018-09-17 16:30:49 +030050 tb_tunnel_names[__tunnel->type], \
Andreas Noever3364f0c2014-06-03 22:04:08 +020051 ## arg); \
52 } while (0)
53
54#define tb_tunnel_WARN(tunnel, fmt, arg...) \
55 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
56#define tb_tunnel_warn(tunnel, fmt, arg...) \
57 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
58#define tb_tunnel_info(tunnel, fmt, arg...) \
59 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
Mika Westerberg0414bec2017-02-19 23:43:26 +020060#define tb_tunnel_dbg(tunnel, fmt, arg...) \
61 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
Andreas Noever3364f0c2014-06-03 22:04:08 +020062
Mika Westerberg4f807e42018-09-17 16:30:49 +030063static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
64 enum tb_tunnel_type type)
Mika Westerberg93f36ad2017-02-19 13:48:29 +020065{
66 struct tb_tunnel *tunnel;
67
68 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
69 if (!tunnel)
70 return NULL;
71
72 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
73 if (!tunnel->paths) {
74 tb_tunnel_free(tunnel);
75 return NULL;
76 }
77
78 INIT_LIST_HEAD(&tunnel->list);
79 tunnel->tb = tb;
80 tunnel->npaths = npaths;
Mika Westerberg4f807e42018-09-17 16:30:49 +030081 tunnel->type = type;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020082
83 return tunnel;
84}
85
86static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
87{
88 int res;
89
90 res = tb_pci_port_enable(tunnel->src_port, activate);
91 if (res)
92 return res;
93
Mika Westerberg0414bec2017-02-19 23:43:26 +020094 if (tb_port_is_pcie_up(tunnel->dst_port))
95 return tb_pci_port_enable(tunnel->dst_port, activate);
96
97 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020098}
99
Mika Westerberg91c0c122019-03-21 19:03:00 +0200100static int tb_initial_credits(const struct tb_switch *sw)
101{
102 /* If the path is complete sw is not NULL */
103 if (sw) {
104 /* More credits for faster link */
105 switch (sw->link_speed * sw->link_width) {
106 case 40:
107 return 32;
108 case 20:
109 return 24;
110 }
111 }
112
113 return 16;
114}
115
Andreas Noever3364f0c2014-06-03 22:04:08 +0200116static void tb_pci_init_path(struct tb_path *path)
117{
118 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
119 path->egress_shared_buffer = TB_PATH_NONE;
120 path->ingress_fc_enable = TB_PATH_ALL;
121 path->ingress_shared_buffer = TB_PATH_NONE;
122 path->priority = 3;
123 path->weight = 1;
124 path->drop_packages = 0;
125 path->nfc_credits = 0;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200126 path->hops[0].initial_credits = 7;
Mika Westerberg75ab3f02020-05-08 11:55:03 +0300127 if (path->path_length > 1)
128 path->hops[1].initial_credits =
129 tb_initial_credits(path->hops[1].in_port->sw);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200130}
131
132/**
133 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
134 * @tb: Pointer to the domain structure
135 * @down: PCIe downstream adapter
136 *
137 * If @down adapter is active, follows the tunnel to the PCIe upstream
138 * adapter and back. Returns the discovered tunnel or %NULL if there was
139 * no tunnel.
140 */
141struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
142{
143 struct tb_tunnel *tunnel;
144 struct tb_path *path;
145
146 if (!tb_pci_port_is_enabled(down))
147 return NULL;
148
Mika Westerberg4f807e42018-09-17 16:30:49 +0300149 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200150 if (!tunnel)
151 return NULL;
152
153 tunnel->activate = tb_pci_activate;
154 tunnel->src_port = down;
155
156 /*
157 * Discover both paths even if they are not complete. We will
158 * clean them up by calling tb_tunnel_deactivate() below in that
159 * case.
160 */
161 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
162 &tunnel->dst_port, "PCIe Up");
163 if (!path) {
164 /* Just disable the downstream port */
165 tb_pci_port_enable(down, false);
166 goto err_free;
167 }
168 tunnel->paths[TB_PCI_PATH_UP] = path;
169 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
170
171 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
172 "PCIe Down");
173 if (!path)
174 goto err_deactivate;
175 tunnel->paths[TB_PCI_PATH_DOWN] = path;
176 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
177
178 /* Validate that the tunnel is complete */
179 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
180 tb_port_warn(tunnel->dst_port,
181 "path does not end on a PCIe adapter, cleaning up\n");
182 goto err_deactivate;
183 }
184
185 if (down != tunnel->src_port) {
186 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
187 goto err_deactivate;
188 }
189
190 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
191 tb_tunnel_warn(tunnel,
192 "tunnel is not fully activated, cleaning up\n");
193 goto err_deactivate;
194 }
195
196 tb_tunnel_dbg(tunnel, "discovered\n");
197 return tunnel;
198
199err_deactivate:
200 tb_tunnel_deactivate(tunnel);
201err_free:
202 tb_tunnel_free(tunnel);
203
204 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200205}
206
207/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200208 * tb_tunnel_alloc_pci() - allocate a pci tunnel
209 * @tb: Pointer to the domain structure
210 * @up: PCIe upstream adapter port
211 * @down: PCIe downstream adapter port
Andreas Noever3364f0c2014-06-03 22:04:08 +0200212 *
213 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
214 * TB_TYPE_PCIE_DOWN.
215 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200216 * Return: Returns a tb_tunnel on success or NULL on failure.
Andreas Noever3364f0c2014-06-03 22:04:08 +0200217 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200218struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
219 struct tb_port *down)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200220{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200221 struct tb_tunnel *tunnel;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200222 struct tb_path *path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200223
Mika Westerberg4f807e42018-09-17 16:30:49 +0300224 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200225 if (!tunnel)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200226 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200227
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200228 tunnel->activate = tb_pci_activate;
229 tunnel->src_port = down;
230 tunnel->dst_port = up;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200231
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200232 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
233 "PCIe Down");
234 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200235 tb_tunnel_free(tunnel);
236 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200237 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200238 tb_pci_init_path(path);
Mika Westerbergce19f912019-06-11 19:31:26 +0300239 tunnel->paths[TB_PCI_PATH_DOWN] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200240
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200241 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
242 "PCIe Up");
243 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200244 tb_tunnel_free(tunnel);
245 return NULL;
246 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200247 tb_pci_init_path(path);
Mika Westerbergce19f912019-06-11 19:31:26 +0300248 tunnel->paths[TB_PCI_PATH_UP] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200249
250 return tunnel;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200251}
252
Mika Westerbergb0407982019-12-17 15:33:40 +0300253static bool tb_dp_is_usb4(const struct tb_switch *sw)
254{
255 /* Titan Ridge DP adapters need the same treatment as USB4 */
256 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
257}
258
Mika Westerbergde718ac2019-02-15 18:18:47 +0200259static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
260{
261 int timeout = 10;
262 u32 val;
263 int ret;
264
265 /* Both ends need to support this */
Mika Westerbergb0407982019-12-17 15:33:40 +0300266 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
Mika Westerbergde718ac2019-02-15 18:18:47 +0200267 return 0;
268
269 ret = tb_port_read(out, &val, TB_CFG_PORT,
270 out->cap_adap + DP_STATUS_CTRL, 1);
271 if (ret)
272 return ret;
273
274 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
275
276 ret = tb_port_write(out, &val, TB_CFG_PORT,
277 out->cap_adap + DP_STATUS_CTRL, 1);
278 if (ret)
279 return ret;
280
281 do {
282 ret = tb_port_read(out, &val, TB_CFG_PORT,
283 out->cap_adap + DP_STATUS_CTRL, 1);
284 if (ret)
285 return ret;
286 if (!(val & DP_STATUS_CTRL_CMHS))
287 return 0;
288 usleep_range(10, 100);
289 } while (timeout--);
290
291 return -ETIMEDOUT;
292}
293
Mika Westerberga11b88a2019-03-26 16:03:48 +0300294static inline u32 tb_dp_cap_get_rate(u32 val)
295{
296 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
297
298 switch (rate) {
299 case DP_COMMON_CAP_RATE_RBR:
300 return 1620;
301 case DP_COMMON_CAP_RATE_HBR:
302 return 2700;
303 case DP_COMMON_CAP_RATE_HBR2:
304 return 5400;
305 case DP_COMMON_CAP_RATE_HBR3:
306 return 8100;
307 default:
308 return 0;
309 }
310}
311
312static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
313{
314 val &= ~DP_COMMON_CAP_RATE_MASK;
315 switch (rate) {
316 default:
317 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
318 /* Fallthrough */
319 case 1620:
320 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
321 break;
322 case 2700:
323 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
324 break;
325 case 5400:
326 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
327 break;
328 case 8100:
329 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
330 break;
331 }
332 return val;
333}
334
335static inline u32 tb_dp_cap_get_lanes(u32 val)
336{
337 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
338
339 switch (lanes) {
340 case DP_COMMON_CAP_1_LANE:
341 return 1;
342 case DP_COMMON_CAP_2_LANES:
343 return 2;
344 case DP_COMMON_CAP_4_LANES:
345 return 4;
346 default:
347 return 0;
348 }
349}
350
351static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
352{
353 val &= ~DP_COMMON_CAP_LANES_MASK;
354 switch (lanes) {
355 default:
356 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
357 lanes);
358 /* Fallthrough */
359 case 1:
360 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
361 break;
362 case 2:
363 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
364 break;
365 case 4:
366 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
367 break;
368 }
369 return val;
370}
371
372static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
373{
374 /* Tunneling removes the DP 8b/10b encoding */
375 return rate * lanes * 8 / 10;
376}
377
378static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
379 u32 out_rate, u32 out_lanes, u32 *new_rate,
380 u32 *new_lanes)
381{
382 static const u32 dp_bw[][2] = {
383 /* Mb/s, lanes */
384 { 8100, 4 }, /* 25920 Mb/s */
385 { 5400, 4 }, /* 17280 Mb/s */
386 { 8100, 2 }, /* 12960 Mb/s */
387 { 2700, 4 }, /* 8640 Mb/s */
388 { 5400, 2 }, /* 8640 Mb/s */
389 { 8100, 1 }, /* 6480 Mb/s */
390 { 1620, 4 }, /* 5184 Mb/s */
391 { 5400, 1 }, /* 4320 Mb/s */
392 { 2700, 2 }, /* 4320 Mb/s */
393 { 1620, 2 }, /* 2592 Mb/s */
394 { 2700, 1 }, /* 2160 Mb/s */
395 { 1620, 1 }, /* 1296 Mb/s */
396 };
397 unsigned int i;
398
399 /*
400 * Find a combination that can fit into max_bw and does not
401 * exceed the maximum rate and lanes supported by the DP OUT and
402 * DP IN adapters.
403 */
404 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
405 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
406 continue;
407
408 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
409 continue;
410
411 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
412 *new_rate = dp_bw[i][0];
413 *new_lanes = dp_bw[i][1];
414 return 0;
415 }
416 }
417
418 return -ENOSR;
419}
420
Mika Westerberg4f807e42018-09-17 16:30:49 +0300421static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
422{
Mika Westerberga11b88a2019-03-26 16:03:48 +0300423 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300424 struct tb_port *out = tunnel->dst_port;
425 struct tb_port *in = tunnel->src_port;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300426 int ret;
427
428 /*
429 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
430 * newer generation hardware.
431 */
432 if (in->sw->generation < 2 || out->sw->generation < 2)
433 return 0;
434
Mika Westerbergde718ac2019-02-15 18:18:47 +0200435 /*
436 * Perform connection manager handshake between IN and OUT ports
437 * before capabilities exchange can take place.
438 */
439 ret = tb_dp_cm_handshake(in, out);
440 if (ret)
441 return ret;
442
Mika Westerberg4f807e42018-09-17 16:30:49 +0300443 /* Read both DP_LOCAL_CAP registers */
444 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300445 in->cap_adap + DP_LOCAL_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300446 if (ret)
447 return ret;
448
449 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300450 out->cap_adap + DP_LOCAL_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300451 if (ret)
452 return ret;
453
454 /* Write IN local caps to OUT remote caps */
455 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300456 out->cap_adap + DP_REMOTE_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300457 if (ret)
458 return ret;
459
Mika Westerberga11b88a2019-03-26 16:03:48 +0300460 in_rate = tb_dp_cap_get_rate(in_dp_cap);
461 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
462 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
463 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
464
465 /*
466 * If the tunnel bandwidth is limited (max_bw is set) then see
467 * if we need to reduce bandwidth to fit there.
468 */
469 out_rate = tb_dp_cap_get_rate(out_dp_cap);
470 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
471 bw = tb_dp_bandwidth(out_rate, out_lanes);
472 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
473 out_rate, out_lanes, bw);
474
475 if (tunnel->max_bw && bw > tunnel->max_bw) {
476 u32 new_rate, new_lanes, new_bw;
477
478 ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
479 out_rate, out_lanes, &new_rate,
480 &new_lanes);
481 if (ret) {
482 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
483 return ret;
484 }
485
486 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
487 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
488 new_rate, new_lanes, new_bw);
489
490 /*
491 * Set new rate and number of lanes before writing it to
492 * the IN port remote caps.
493 */
494 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
495 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
496 }
497
Mika Westerberg4f807e42018-09-17 16:30:49 +0300498 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300499 in->cap_adap + DP_REMOTE_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300500}
501
502static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
503{
504 int ret;
505
506 if (active) {
507 struct tb_path **paths;
508 int last;
509
510 paths = tunnel->paths;
511 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
512
513 tb_dp_port_set_hops(tunnel->src_port,
514 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
515 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
516 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
517
518 tb_dp_port_set_hops(tunnel->dst_port,
519 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
520 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
521 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
522 } else {
523 tb_dp_port_hpd_clear(tunnel->src_port);
524 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
525 if (tb_port_is_dpout(tunnel->dst_port))
526 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
527 }
528
529 ret = tb_dp_port_enable(tunnel->src_port, active);
530 if (ret)
531 return ret;
532
533 if (tb_port_is_dpout(tunnel->dst_port))
534 return tb_dp_port_enable(tunnel->dst_port, active);
535
536 return 0;
537}
538
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200539static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
540 int *consumed_down)
Mika Westerberga11b88a2019-03-26 16:03:48 +0300541{
542 struct tb_port *in = tunnel->src_port;
543 const struct tb_switch *sw = in->sw;
544 u32 val, rate = 0, lanes = 0;
545 int ret;
546
Mika Westerbergb0407982019-12-17 15:33:40 +0300547 if (tb_dp_is_usb4(sw)) {
Mika Westerberga11b88a2019-03-26 16:03:48 +0300548 int timeout = 10;
549
550 /*
551 * Wait for DPRX done. Normally it should be already set
552 * for active tunnel.
553 */
554 do {
555 ret = tb_port_read(in, &val, TB_CFG_PORT,
556 in->cap_adap + DP_COMMON_CAP, 1);
557 if (ret)
558 return ret;
559
560 if (val & DP_COMMON_CAP_DPRX_DONE) {
561 rate = tb_dp_cap_get_rate(val);
562 lanes = tb_dp_cap_get_lanes(val);
563 break;
564 }
565 msleep(250);
566 } while (timeout--);
567
568 if (!timeout)
569 return -ETIMEDOUT;
570 } else if (sw->generation >= 2) {
571 /*
572 * Read from the copied remote cap so that we take into
573 * account if capabilities were reduced during exchange.
574 */
575 ret = tb_port_read(in, &val, TB_CFG_PORT,
576 in->cap_adap + DP_REMOTE_CAP, 1);
577 if (ret)
578 return ret;
579
580 rate = tb_dp_cap_get_rate(val);
581 lanes = tb_dp_cap_get_lanes(val);
582 } else {
583 /* No bandwidth management for legacy devices */
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200584 *consumed_up = 0;
585 *consumed_down = 0;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300586 return 0;
587 }
588
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200589 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
590 *consumed_up = 0;
591 *consumed_down = tb_dp_bandwidth(rate, lanes);
592 } else {
593 *consumed_up = tb_dp_bandwidth(rate, lanes);
594 *consumed_down = 0;
595 }
596
597 return 0;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300598}
599
Mika Westerberg4f807e42018-09-17 16:30:49 +0300600static void tb_dp_init_aux_path(struct tb_path *path)
601{
602 int i;
603
604 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
605 path->egress_shared_buffer = TB_PATH_NONE;
606 path->ingress_fc_enable = TB_PATH_ALL;
607 path->ingress_shared_buffer = TB_PATH_NONE;
608 path->priority = 2;
609 path->weight = 1;
610
611 for (i = 0; i < path->path_length; i++)
612 path->hops[i].initial_credits = 1;
613}
614
615static void tb_dp_init_video_path(struct tb_path *path, bool discover)
616{
617 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
618
619 path->egress_fc_enable = TB_PATH_NONE;
620 path->egress_shared_buffer = TB_PATH_NONE;
621 path->ingress_fc_enable = TB_PATH_NONE;
622 path->ingress_shared_buffer = TB_PATH_NONE;
623 path->priority = 1;
624 path->weight = 1;
625
626 if (discover) {
Mika Westerberg8f57d472019-09-06 11:59:00 +0300627 path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300628 } else {
629 u32 max_credits;
630
Mika Westerberg8f57d472019-09-06 11:59:00 +0300631 max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
632 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300633 /* Leave some credits for AUX path */
634 path->nfc_credits = min(max_credits - 2, 12U);
635 }
636}
637
638/**
639 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
640 * @tb: Pointer to the domain structure
641 * @in: DP in adapter
642 *
643 * If @in adapter is active, follows the tunnel to the DP out adapter
644 * and back. Returns the discovered tunnel or %NULL if there was no
645 * tunnel.
646 *
647 * Return: DP tunnel or %NULL if no tunnel found.
648 */
649struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
650{
651 struct tb_tunnel *tunnel;
652 struct tb_port *port;
653 struct tb_path *path;
654
655 if (!tb_dp_port_is_enabled(in))
656 return NULL;
657
658 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
659 if (!tunnel)
660 return NULL;
661
662 tunnel->init = tb_dp_xchg_caps;
663 tunnel->activate = tb_dp_activate;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300664 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300665 tunnel->src_port = in;
666
667 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
668 &tunnel->dst_port, "Video");
669 if (!path) {
670 /* Just disable the DP IN port */
671 tb_dp_port_enable(in, false);
672 goto err_free;
673 }
674 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
675 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
676
677 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
678 if (!path)
679 goto err_deactivate;
680 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
681 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
682
683 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
684 &port, "AUX RX");
685 if (!path)
686 goto err_deactivate;
687 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
688 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
689
690 /* Validate that the tunnel is complete */
691 if (!tb_port_is_dpout(tunnel->dst_port)) {
692 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
693 goto err_deactivate;
694 }
695
696 if (!tb_dp_port_is_enabled(tunnel->dst_port))
697 goto err_deactivate;
698
699 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
700 goto err_deactivate;
701
702 if (port != tunnel->src_port) {
703 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
704 goto err_deactivate;
705 }
706
707 tb_tunnel_dbg(tunnel, "discovered\n");
708 return tunnel;
709
710err_deactivate:
711 tb_tunnel_deactivate(tunnel);
712err_free:
713 tb_tunnel_free(tunnel);
714
715 return NULL;
716}
717
718/**
719 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
720 * @tb: Pointer to the domain structure
721 * @in: DP in adapter port
722 * @out: DP out adapter port
Mika Westerberga11b88a2019-03-26 16:03:48 +0300723 * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300724 *
725 * Allocates a tunnel between @in and @out that is capable of tunneling
726 * Display Port traffic.
727 *
728 * Return: Returns a tb_tunnel on success or NULL on failure.
729 */
730struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
Mika Westerberga11b88a2019-03-26 16:03:48 +0300731 struct tb_port *out, int max_bw)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300732{
733 struct tb_tunnel *tunnel;
734 struct tb_path **paths;
735 struct tb_path *path;
736
737 if (WARN_ON(!in->cap_adap || !out->cap_adap))
738 return NULL;
739
740 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
741 if (!tunnel)
742 return NULL;
743
744 tunnel->init = tb_dp_xchg_caps;
745 tunnel->activate = tb_dp_activate;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300746 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300747 tunnel->src_port = in;
748 tunnel->dst_port = out;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300749 tunnel->max_bw = max_bw;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300750
751 paths = tunnel->paths;
752
753 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
754 1, "Video");
755 if (!path)
756 goto err_free;
757 tb_dp_init_video_path(path, false);
758 paths[TB_DP_VIDEO_PATH_OUT] = path;
759
760 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
761 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
762 if (!path)
763 goto err_free;
764 tb_dp_init_aux_path(path);
765 paths[TB_DP_AUX_PATH_OUT] = path;
766
767 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
768 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
769 if (!path)
770 goto err_free;
771 tb_dp_init_aux_path(path);
772 paths[TB_DP_AUX_PATH_IN] = path;
773
774 return tunnel;
775
776err_free:
777 tb_tunnel_free(tunnel);
778 return NULL;
779}
780
Mika Westerberg44242d62018-09-28 16:35:32 +0300781static u32 tb_dma_credits(struct tb_port *nhi)
782{
783 u32 max_credits;
784
Mika Westerberg8f57d472019-09-06 11:59:00 +0300785 max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
786 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
Mika Westerberg44242d62018-09-28 16:35:32 +0300787 return min(max_credits, 13U);
788}
789
790static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
791{
792 struct tb_port *nhi = tunnel->src_port;
793 u32 credits;
794
795 credits = active ? tb_dma_credits(nhi) : 0;
796 return tb_port_set_initial_credits(nhi, credits);
797}
798
799static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
800 unsigned int efc, u32 credits)
801{
802 int i;
803
804 path->egress_fc_enable = efc;
805 path->ingress_fc_enable = TB_PATH_ALL;
806 path->egress_shared_buffer = TB_PATH_NONE;
807 path->ingress_shared_buffer = isb;
808 path->priority = 5;
809 path->weight = 1;
810 path->clear_fc = true;
811
812 for (i = 0; i < path->path_length; i++)
813 path->hops[i].initial_credits = credits;
814}
815
816/**
817 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
818 * @tb: Pointer to the domain structure
819 * @nhi: Host controller port
820 * @dst: Destination null port which the other domain is connected to
821 * @transmit_ring: NHI ring number used to send packets towards the
822 * other domain
823 * @transmit_path: HopID used for transmitting packets
824 * @receive_ring: NHI ring number used to receive packets from the
825 * other domain
826 * @reveive_path: HopID used for receiving packets
827 *
828 * Return: Returns a tb_tunnel on success or NULL on failure.
829 */
830struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
831 struct tb_port *dst, int transmit_ring,
832 int transmit_path, int receive_ring,
833 int receive_path)
834{
835 struct tb_tunnel *tunnel;
836 struct tb_path *path;
837 u32 credits;
838
839 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
840 if (!tunnel)
841 return NULL;
842
843 tunnel->activate = tb_dma_activate;
844 tunnel->src_port = nhi;
845 tunnel->dst_port = dst;
846
847 credits = tb_dma_credits(nhi);
848
849 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
850 if (!path) {
851 tb_tunnel_free(tunnel);
852 return NULL;
853 }
854 tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
855 credits);
856 tunnel->paths[TB_DMA_PATH_IN] = path;
857
858 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
859 if (!path) {
860 tb_tunnel_free(tunnel);
861 return NULL;
862 }
863 tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
864 tunnel->paths[TB_DMA_PATH_OUT] = path;
865
866 return tunnel;
867}
868
Rajmohan Manie6f81852019-12-17 15:33:44 +0300869static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
870{
871 int res;
872
873 res = tb_usb3_port_enable(tunnel->src_port, activate);
874 if (res)
875 return res;
876
877 if (tb_port_is_usb3_up(tunnel->dst_port))
878 return tb_usb3_port_enable(tunnel->dst_port, activate);
879
880 return 0;
881}
882
883static void tb_usb3_init_path(struct tb_path *path)
884{
885 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
886 path->egress_shared_buffer = TB_PATH_NONE;
887 path->ingress_fc_enable = TB_PATH_ALL;
888 path->ingress_shared_buffer = TB_PATH_NONE;
889 path->priority = 3;
890 path->weight = 3;
891 path->drop_packages = 0;
892 path->nfc_credits = 0;
893 path->hops[0].initial_credits = 7;
Mika Westerberg75ab3f02020-05-08 11:55:03 +0300894 if (path->path_length > 1)
895 path->hops[1].initial_credits =
896 tb_initial_credits(path->hops[1].in_port->sw);
Rajmohan Manie6f81852019-12-17 15:33:44 +0300897}
898
899/**
900 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
901 * @tb: Pointer to the domain structure
902 * @down: USB3 downstream adapter
903 *
904 * If @down adapter is active, follows the tunnel to the USB3 upstream
905 * adapter and back. Returns the discovered tunnel or %NULL if there was
906 * no tunnel.
907 */
908struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
909{
910 struct tb_tunnel *tunnel;
911 struct tb_path *path;
912
913 if (!tb_usb3_port_is_enabled(down))
914 return NULL;
915
916 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
917 if (!tunnel)
918 return NULL;
919
920 tunnel->activate = tb_usb3_activate;
921 tunnel->src_port = down;
922
923 /*
924 * Discover both paths even if they are not complete. We will
925 * clean them up by calling tb_tunnel_deactivate() below in that
926 * case.
927 */
928 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
Mika Westerberg783735f2020-04-02 12:45:34 +0300929 &tunnel->dst_port, "USB3 Down");
Rajmohan Manie6f81852019-12-17 15:33:44 +0300930 if (!path) {
931 /* Just disable the downstream port */
932 tb_usb3_port_enable(down, false);
933 goto err_free;
934 }
Rajmohan Manie6f81852019-12-17 15:33:44 +0300935 tunnel->paths[TB_USB3_PATH_DOWN] = path;
936 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
937
Mika Westerberg783735f2020-04-02 12:45:34 +0300938 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
939 "USB3 Up");
940 if (!path)
941 goto err_deactivate;
942 tunnel->paths[TB_USB3_PATH_UP] = path;
943 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
944
Rajmohan Manie6f81852019-12-17 15:33:44 +0300945 /* Validate that the tunnel is complete */
946 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
947 tb_port_warn(tunnel->dst_port,
948 "path does not end on an USB3 adapter, cleaning up\n");
949 goto err_deactivate;
950 }
951
952 if (down != tunnel->src_port) {
953 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
954 goto err_deactivate;
955 }
956
957 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
958 tb_tunnel_warn(tunnel,
959 "tunnel is not fully activated, cleaning up\n");
960 goto err_deactivate;
961 }
962
963 tb_tunnel_dbg(tunnel, "discovered\n");
964 return tunnel;
965
966err_deactivate:
967 tb_tunnel_deactivate(tunnel);
968err_free:
969 tb_tunnel_free(tunnel);
970
971 return NULL;
972}
973
974/**
975 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
976 * @tb: Pointer to the domain structure
977 * @up: USB3 upstream adapter port
978 * @down: USB3 downstream adapter port
979 *
980 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
981 * @TB_TYPE_USB3_DOWN.
982 *
983 * Return: Returns a tb_tunnel on success or %NULL on failure.
984 */
985struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
986 struct tb_port *down)
987{
988 struct tb_tunnel *tunnel;
989 struct tb_path *path;
990
991 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
992 if (!tunnel)
993 return NULL;
994
995 tunnel->activate = tb_usb3_activate;
996 tunnel->src_port = down;
997 tunnel->dst_port = up;
998
999 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1000 "USB3 Down");
1001 if (!path) {
1002 tb_tunnel_free(tunnel);
1003 return NULL;
1004 }
1005 tb_usb3_init_path(path);
1006 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1007
1008 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1009 "USB3 Up");
1010 if (!path) {
1011 tb_tunnel_free(tunnel);
1012 return NULL;
1013 }
1014 tb_usb3_init_path(path);
1015 tunnel->paths[TB_USB3_PATH_UP] = path;
1016
1017 return tunnel;
1018}
1019
Andreas Noever3364f0c2014-06-03 22:04:08 +02001020/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001021 * tb_tunnel_free() - free a tunnel
1022 * @tunnel: Tunnel to be freed
Andreas Noever3364f0c2014-06-03 22:04:08 +02001023 *
Mika Westerbergab9f31c2019-03-06 18:21:08 +02001024 * Frees a tunnel. The tunnel does not need to be deactivated.
Andreas Noever3364f0c2014-06-03 22:04:08 +02001025 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001026void tb_tunnel_free(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001027{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001028 int i;
1029
1030 if (!tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001031 return;
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001032
1033 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001034 if (tunnel->paths[i])
1035 tb_path_free(tunnel->paths[i]);
1036 }
1037
1038 kfree(tunnel->paths);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001039 kfree(tunnel);
1040}
1041
1042/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001043 * tb_tunnel_is_invalid - check whether an activated path is still valid
1044 * @tunnel: Tunnel to check
Andreas Noever3364f0c2014-06-03 22:04:08 +02001045 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001046bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001047{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001048 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001049
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001050 for (i = 0; i < tunnel->npaths; i++) {
1051 WARN_ON(!tunnel->paths[i]->activated);
1052 if (tb_path_is_invalid(tunnel->paths[i]))
1053 return true;
1054 }
1055
1056 return false;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001057}
1058
1059/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001060 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1061 * @tunnel: Tunnel to restart
Andreas Noever3364f0c2014-06-03 22:04:08 +02001062 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001063 * Return: 0 on success and negative errno in case if failure
Andreas Noever3364f0c2014-06-03 22:04:08 +02001064 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001065int tb_tunnel_restart(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001066{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001067 int res, i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001068
Mika Westerberg62efe692018-09-17 16:32:13 +03001069 tb_tunnel_dbg(tunnel, "activating\n");
Andreas Noever3364f0c2014-06-03 22:04:08 +02001070
Mika Westerbergaae9e272017-02-19 23:37:35 +02001071 /*
1072 * Make sure all paths are properly disabled before enabling
1073 * them again.
1074 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001075 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerbergaae9e272017-02-19 23:37:35 +02001076 if (tunnel->paths[i]->activated) {
1077 tb_path_deactivate(tunnel->paths[i]);
1078 tunnel->paths[i]->activated = false;
1079 }
1080 }
1081
Mika Westerberg4f807e42018-09-17 16:30:49 +03001082 if (tunnel->init) {
1083 res = tunnel->init(tunnel);
1084 if (res)
1085 return res;
1086 }
1087
Mika Westerbergaae9e272017-02-19 23:37:35 +02001088 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001089 res = tb_path_activate(tunnel->paths[i]);
1090 if (res)
1091 goto err;
1092 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001093
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001094 if (tunnel->activate) {
1095 res = tunnel->activate(tunnel, true);
1096 if (res)
1097 goto err;
1098 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001099
Andreas Noever3364f0c2014-06-03 22:04:08 +02001100 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001101
Andreas Noever3364f0c2014-06-03 22:04:08 +02001102err:
1103 tb_tunnel_warn(tunnel, "activation failed\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001104 tb_tunnel_deactivate(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001105 return res;
1106}
1107
1108/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001109 * tb_tunnel_activate() - activate a tunnel
1110 * @tunnel: Tunnel to activate
Andreas Noever3364f0c2014-06-03 22:04:08 +02001111 *
1112 * Return: Returns 0 on success or an error code on failure.
1113 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001114int tb_tunnel_activate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001115{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001116 int i;
1117
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001118 for (i = 0; i < tunnel->npaths; i++) {
1119 if (tunnel->paths[i]->activated) {
1120 tb_tunnel_WARN(tunnel,
1121 "trying to activate an already activated tunnel\n");
1122 return -EINVAL;
1123 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001124 }
1125
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001126 return tb_tunnel_restart(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001127}
1128
Andreas Noever3364f0c2014-06-03 22:04:08 +02001129/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001130 * tb_tunnel_deactivate() - deactivate a tunnel
1131 * @tunnel: Tunnel to deactivate
Andreas Noever3364f0c2014-06-03 22:04:08 +02001132 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001133void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001134{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001135 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001136
Mika Westerberg62efe692018-09-17 16:32:13 +03001137 tb_tunnel_dbg(tunnel, "deactivating\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001138
1139 if (tunnel->activate)
1140 tunnel->activate(tunnel, false);
1141
1142 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg0414bec2017-02-19 23:43:26 +02001143 if (tunnel->paths[i] && tunnel->paths[i]->activated)
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001144 tb_path_deactivate(tunnel->paths[i]);
1145 }
1146}
Mika Westerberga11b88a2019-03-26 16:03:48 +03001147
1148/**
1149 * tb_tunnel_switch_on_path() - Does the tunnel go through switch
1150 * @tunnel: Tunnel to check
1151 * @sw: Switch to check
1152 *
1153 * Returns true if @tunnel goes through @sw (direction does not matter),
1154 * false otherwise.
1155 */
1156bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
1157 const struct tb_switch *sw)
1158{
1159 int i;
1160
1161 for (i = 0; i < tunnel->npaths; i++) {
1162 if (!tunnel->paths[i])
1163 continue;
1164 if (tb_path_switch_on_path(tunnel->paths[i], sw))
1165 return true;
1166 }
1167
1168 return false;
1169}
1170
1171static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1172{
1173 int i;
1174
1175 for (i = 0; i < tunnel->npaths; i++) {
1176 if (!tunnel->paths[i])
1177 return false;
1178 if (!tunnel->paths[i]->activated)
1179 return false;
1180 }
1181
1182 return true;
1183}
1184
1185/**
1186 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1187 * @tunnel: Tunnel to check
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001188 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1189 * Can be %NULL.
1190 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1191 * Can be %NULL.
Mika Westerberga11b88a2019-03-26 16:03:48 +03001192 *
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001193 * Stores the amount of isochronous bandwidth @tunnel consumes in
1194 * @consumed_up and @consumed_down. In case of success returns %0,
1195 * negative errno otherwise.
Mika Westerberga11b88a2019-03-26 16:03:48 +03001196 */
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001197int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1198 int *consumed_down)
Mika Westerberga11b88a2019-03-26 16:03:48 +03001199{
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001200 int up_bw = 0, down_bw = 0;
1201
Mika Westerberga11b88a2019-03-26 16:03:48 +03001202 if (!tb_tunnel_is_active(tunnel))
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001203 goto out;
Mika Westerberga11b88a2019-03-26 16:03:48 +03001204
1205 if (tunnel->consumed_bandwidth) {
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001206 int ret;
Mika Westerberga11b88a2019-03-26 16:03:48 +03001207
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001208 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1209 if (ret)
1210 return ret;
1211
1212 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1213 down_bw);
Mika Westerberga11b88a2019-03-26 16:03:48 +03001214 }
1215
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001216out:
1217 if (consumed_up)
1218 *consumed_up = up_bw;
1219 if (consumed_down)
1220 *consumed_down = down_bw;
1221
Mika Westerberga11b88a2019-03-26 16:03:48 +03001222 return 0;
1223}