blob: 2e7ec037a73e313c05149a6efb35fbc42c1824f1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
Mika Westerbergde718ac2019-02-15 18:18:47 +02009#include <linux/delay.h>
Andreas Noever3364f0c2014-06-03 22:04:08 +020010#include <linux/slab.h>
11#include <linux/list.h>
12
Mika Westerberg1752b9f2017-02-19 10:58:35 +020013#include "tunnel.h"
Andreas Noever3364f0c2014-06-03 22:04:08 +020014#include "tb.h"
15
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +020016/* PCIe adapters use always HopID of 8 for both directions */
17#define TB_PCI_HOPID 8
18
Mika Westerberg93f36ad2017-02-19 13:48:29 +020019#define TB_PCI_PATH_DOWN 0
20#define TB_PCI_PATH_UP 1
21
Rajmohan Manie6f81852019-12-17 15:33:44 +030022/* USB3 adapters use always HopID of 8 for both directions */
23#define TB_USB3_HOPID 8
24
25#define TB_USB3_PATH_DOWN 0
26#define TB_USB3_PATH_UP 1
27
Mika Westerberg4f807e42018-09-17 16:30:49 +030028/* DP adapters use HopID 8 for AUX and 9 for Video */
29#define TB_DP_AUX_TX_HOPID 8
30#define TB_DP_AUX_RX_HOPID 8
31#define TB_DP_VIDEO_HOPID 9
32
33#define TB_DP_VIDEO_PATH_OUT 0
34#define TB_DP_AUX_PATH_OUT 1
35#define TB_DP_AUX_PATH_IN 2
36
Rajmohan Manie6f81852019-12-17 15:33:44 +030037static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
Mika Westerberg4f807e42018-09-17 16:30:49 +030038
Andreas Noever3364f0c2014-06-03 22:04:08 +020039#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
40 do { \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020041 struct tb_tunnel *__tunnel = (tunnel); \
Mika Westerberg4f807e42018-09-17 16:30:49 +030042 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020043 tb_route(__tunnel->src_port->sw), \
44 __tunnel->src_port->port, \
45 tb_route(__tunnel->dst_port->sw), \
46 __tunnel->dst_port->port, \
Mika Westerberg4f807e42018-09-17 16:30:49 +030047 tb_tunnel_names[__tunnel->type], \
Andreas Noever3364f0c2014-06-03 22:04:08 +020048 ## arg); \
49 } while (0)
50
51#define tb_tunnel_WARN(tunnel, fmt, arg...) \
52 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
53#define tb_tunnel_warn(tunnel, fmt, arg...) \
54 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
55#define tb_tunnel_info(tunnel, fmt, arg...) \
56 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
Mika Westerberg0414bec2017-02-19 23:43:26 +020057#define tb_tunnel_dbg(tunnel, fmt, arg...) \
58 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
Andreas Noever3364f0c2014-06-03 22:04:08 +020059
Mika Westerberg4f807e42018-09-17 16:30:49 +030060static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
61 enum tb_tunnel_type type)
Mika Westerberg93f36ad2017-02-19 13:48:29 +020062{
63 struct tb_tunnel *tunnel;
64
65 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
66 if (!tunnel)
67 return NULL;
68
69 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
70 if (!tunnel->paths) {
71 tb_tunnel_free(tunnel);
72 return NULL;
73 }
74
75 INIT_LIST_HEAD(&tunnel->list);
76 tunnel->tb = tb;
77 tunnel->npaths = npaths;
Mika Westerberg4f807e42018-09-17 16:30:49 +030078 tunnel->type = type;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020079
80 return tunnel;
81}
82
83static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
84{
85 int res;
86
87 res = tb_pci_port_enable(tunnel->src_port, activate);
88 if (res)
89 return res;
90
Mika Westerberg0414bec2017-02-19 23:43:26 +020091 if (tb_port_is_pcie_up(tunnel->dst_port))
92 return tb_pci_port_enable(tunnel->dst_port, activate);
93
94 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020095}
96
Mika Westerberg91c0c122019-03-21 19:03:00 +020097static int tb_initial_credits(const struct tb_switch *sw)
98{
99 /* If the path is complete sw is not NULL */
100 if (sw) {
101 /* More credits for faster link */
102 switch (sw->link_speed * sw->link_width) {
103 case 40:
104 return 32;
105 case 20:
106 return 24;
107 }
108 }
109
110 return 16;
111}
112
Andreas Noever3364f0c2014-06-03 22:04:08 +0200113static void tb_pci_init_path(struct tb_path *path)
114{
115 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
116 path->egress_shared_buffer = TB_PATH_NONE;
117 path->ingress_fc_enable = TB_PATH_ALL;
118 path->ingress_shared_buffer = TB_PATH_NONE;
119 path->priority = 3;
120 path->weight = 1;
121 path->drop_packages = 0;
122 path->nfc_credits = 0;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200123 path->hops[0].initial_credits = 7;
Mika Westerberg75ab3f02020-05-08 11:55:03 +0300124 if (path->path_length > 1)
125 path->hops[1].initial_credits =
126 tb_initial_credits(path->hops[1].in_port->sw);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200127}
128
129/**
130 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
131 * @tb: Pointer to the domain structure
132 * @down: PCIe downstream adapter
133 *
134 * If @down adapter is active, follows the tunnel to the PCIe upstream
135 * adapter and back. Returns the discovered tunnel or %NULL if there was
136 * no tunnel.
137 */
138struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
139{
140 struct tb_tunnel *tunnel;
141 struct tb_path *path;
142
143 if (!tb_pci_port_is_enabled(down))
144 return NULL;
145
Mika Westerberg4f807e42018-09-17 16:30:49 +0300146 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200147 if (!tunnel)
148 return NULL;
149
150 tunnel->activate = tb_pci_activate;
151 tunnel->src_port = down;
152
153 /*
154 * Discover both paths even if they are not complete. We will
155 * clean them up by calling tb_tunnel_deactivate() below in that
156 * case.
157 */
158 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
159 &tunnel->dst_port, "PCIe Up");
160 if (!path) {
161 /* Just disable the downstream port */
162 tb_pci_port_enable(down, false);
163 goto err_free;
164 }
165 tunnel->paths[TB_PCI_PATH_UP] = path;
166 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
167
168 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
169 "PCIe Down");
170 if (!path)
171 goto err_deactivate;
172 tunnel->paths[TB_PCI_PATH_DOWN] = path;
173 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
174
175 /* Validate that the tunnel is complete */
176 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
177 tb_port_warn(tunnel->dst_port,
178 "path does not end on a PCIe adapter, cleaning up\n");
179 goto err_deactivate;
180 }
181
182 if (down != tunnel->src_port) {
183 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
184 goto err_deactivate;
185 }
186
187 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
188 tb_tunnel_warn(tunnel,
189 "tunnel is not fully activated, cleaning up\n");
190 goto err_deactivate;
191 }
192
193 tb_tunnel_dbg(tunnel, "discovered\n");
194 return tunnel;
195
196err_deactivate:
197 tb_tunnel_deactivate(tunnel);
198err_free:
199 tb_tunnel_free(tunnel);
200
201 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200202}
203
204/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200205 * tb_tunnel_alloc_pci() - allocate a pci tunnel
206 * @tb: Pointer to the domain structure
207 * @up: PCIe upstream adapter port
208 * @down: PCIe downstream adapter port
Andreas Noever3364f0c2014-06-03 22:04:08 +0200209 *
210 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
211 * TB_TYPE_PCIE_DOWN.
212 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200213 * Return: Returns a tb_tunnel on success or NULL on failure.
Andreas Noever3364f0c2014-06-03 22:04:08 +0200214 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200215struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
216 struct tb_port *down)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200217{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200218 struct tb_tunnel *tunnel;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200219 struct tb_path *path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200220
Mika Westerberg4f807e42018-09-17 16:30:49 +0300221 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200222 if (!tunnel)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200223 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200224
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200225 tunnel->activate = tb_pci_activate;
226 tunnel->src_port = down;
227 tunnel->dst_port = up;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200228
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200229 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
230 "PCIe Down");
231 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200232 tb_tunnel_free(tunnel);
233 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200234 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200235 tb_pci_init_path(path);
Mika Westerbergce19f912019-06-11 19:31:26 +0300236 tunnel->paths[TB_PCI_PATH_DOWN] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200237
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200238 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
239 "PCIe Up");
240 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200241 tb_tunnel_free(tunnel);
242 return NULL;
243 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200244 tb_pci_init_path(path);
Mika Westerbergce19f912019-06-11 19:31:26 +0300245 tunnel->paths[TB_PCI_PATH_UP] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200246
247 return tunnel;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200248}
249
Mika Westerbergb0407982019-12-17 15:33:40 +0300250static bool tb_dp_is_usb4(const struct tb_switch *sw)
251{
252 /* Titan Ridge DP adapters need the same treatment as USB4 */
253 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
254}
255
Mika Westerbergde718ac2019-02-15 18:18:47 +0200256static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
257{
258 int timeout = 10;
259 u32 val;
260 int ret;
261
262 /* Both ends need to support this */
Mika Westerbergb0407982019-12-17 15:33:40 +0300263 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
Mika Westerbergde718ac2019-02-15 18:18:47 +0200264 return 0;
265
266 ret = tb_port_read(out, &val, TB_CFG_PORT,
267 out->cap_adap + DP_STATUS_CTRL, 1);
268 if (ret)
269 return ret;
270
271 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
272
273 ret = tb_port_write(out, &val, TB_CFG_PORT,
274 out->cap_adap + DP_STATUS_CTRL, 1);
275 if (ret)
276 return ret;
277
278 do {
279 ret = tb_port_read(out, &val, TB_CFG_PORT,
280 out->cap_adap + DP_STATUS_CTRL, 1);
281 if (ret)
282 return ret;
283 if (!(val & DP_STATUS_CTRL_CMHS))
284 return 0;
285 usleep_range(10, 100);
286 } while (timeout--);
287
288 return -ETIMEDOUT;
289}
290
Mika Westerberga11b88a2019-03-26 16:03:48 +0300291static inline u32 tb_dp_cap_get_rate(u32 val)
292{
293 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
294
295 switch (rate) {
296 case DP_COMMON_CAP_RATE_RBR:
297 return 1620;
298 case DP_COMMON_CAP_RATE_HBR:
299 return 2700;
300 case DP_COMMON_CAP_RATE_HBR2:
301 return 5400;
302 case DP_COMMON_CAP_RATE_HBR3:
303 return 8100;
304 default:
305 return 0;
306 }
307}
308
309static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
310{
311 val &= ~DP_COMMON_CAP_RATE_MASK;
312 switch (rate) {
313 default:
314 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500315 fallthrough;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300316 case 1620:
317 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
318 break;
319 case 2700:
320 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
321 break;
322 case 5400:
323 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
324 break;
325 case 8100:
326 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
327 break;
328 }
329 return val;
330}
331
332static inline u32 tb_dp_cap_get_lanes(u32 val)
333{
334 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
335
336 switch (lanes) {
337 case DP_COMMON_CAP_1_LANE:
338 return 1;
339 case DP_COMMON_CAP_2_LANES:
340 return 2;
341 case DP_COMMON_CAP_4_LANES:
342 return 4;
343 default:
344 return 0;
345 }
346}
347
348static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
349{
350 val &= ~DP_COMMON_CAP_LANES_MASK;
351 switch (lanes) {
352 default:
353 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
354 lanes);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500355 fallthrough;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300356 case 1:
357 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
358 break;
359 case 2:
360 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
361 break;
362 case 4:
363 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
364 break;
365 }
366 return val;
367}
368
369static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
370{
371 /* Tunneling removes the DP 8b/10b encoding */
372 return rate * lanes * 8 / 10;
373}
374
375static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
376 u32 out_rate, u32 out_lanes, u32 *new_rate,
377 u32 *new_lanes)
378{
379 static const u32 dp_bw[][2] = {
380 /* Mb/s, lanes */
381 { 8100, 4 }, /* 25920 Mb/s */
382 { 5400, 4 }, /* 17280 Mb/s */
383 { 8100, 2 }, /* 12960 Mb/s */
384 { 2700, 4 }, /* 8640 Mb/s */
385 { 5400, 2 }, /* 8640 Mb/s */
386 { 8100, 1 }, /* 6480 Mb/s */
387 { 1620, 4 }, /* 5184 Mb/s */
388 { 5400, 1 }, /* 4320 Mb/s */
389 { 2700, 2 }, /* 4320 Mb/s */
390 { 1620, 2 }, /* 2592 Mb/s */
391 { 2700, 1 }, /* 2160 Mb/s */
392 { 1620, 1 }, /* 1296 Mb/s */
393 };
394 unsigned int i;
395
396 /*
397 * Find a combination that can fit into max_bw and does not
398 * exceed the maximum rate and lanes supported by the DP OUT and
399 * DP IN adapters.
400 */
401 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
402 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
403 continue;
404
405 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
406 continue;
407
408 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
409 *new_rate = dp_bw[i][0];
410 *new_lanes = dp_bw[i][1];
411 return 0;
412 }
413 }
414
415 return -ENOSR;
416}
417
Mika Westerberg4f807e42018-09-17 16:30:49 +0300418static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
419{
Mika Westerberga11b88a2019-03-26 16:03:48 +0300420 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300421 struct tb_port *out = tunnel->dst_port;
422 struct tb_port *in = tunnel->src_port;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200423 int ret, max_bw;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300424
425 /*
426 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
427 * newer generation hardware.
428 */
429 if (in->sw->generation < 2 || out->sw->generation < 2)
430 return 0;
431
Mika Westerbergde718ac2019-02-15 18:18:47 +0200432 /*
433 * Perform connection manager handshake between IN and OUT ports
434 * before capabilities exchange can take place.
435 */
436 ret = tb_dp_cm_handshake(in, out);
437 if (ret)
438 return ret;
439
Mika Westerberg4f807e42018-09-17 16:30:49 +0300440 /* Read both DP_LOCAL_CAP registers */
441 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300442 in->cap_adap + DP_LOCAL_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300443 if (ret)
444 return ret;
445
446 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300447 out->cap_adap + DP_LOCAL_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300448 if (ret)
449 return ret;
450
451 /* Write IN local caps to OUT remote caps */
452 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300453 out->cap_adap + DP_REMOTE_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300454 if (ret)
455 return ret;
456
Mika Westerberga11b88a2019-03-26 16:03:48 +0300457 in_rate = tb_dp_cap_get_rate(in_dp_cap);
458 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
459 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
460 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
461
462 /*
463 * If the tunnel bandwidth is limited (max_bw is set) then see
464 * if we need to reduce bandwidth to fit there.
465 */
466 out_rate = tb_dp_cap_get_rate(out_dp_cap);
467 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
468 bw = tb_dp_bandwidth(out_rate, out_lanes);
469 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
470 out_rate, out_lanes, bw);
471
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200472 if (in->sw->config.depth < out->sw->config.depth)
473 max_bw = tunnel->max_down;
474 else
475 max_bw = tunnel->max_up;
476
477 if (max_bw && bw > max_bw) {
Mika Westerberga11b88a2019-03-26 16:03:48 +0300478 u32 new_rate, new_lanes, new_bw;
479
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200480 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
Mika Westerberga11b88a2019-03-26 16:03:48 +0300481 out_rate, out_lanes, &new_rate,
482 &new_lanes);
483 if (ret) {
484 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
485 return ret;
486 }
487
488 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
489 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
490 new_rate, new_lanes, new_bw);
491
492 /*
493 * Set new rate and number of lanes before writing it to
494 * the IN port remote caps.
495 */
496 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
497 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
498 }
499
Mika Westerberg4f807e42018-09-17 16:30:49 +0300500 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300501 in->cap_adap + DP_REMOTE_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300502}
503
504static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
505{
506 int ret;
507
508 if (active) {
509 struct tb_path **paths;
510 int last;
511
512 paths = tunnel->paths;
513 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
514
515 tb_dp_port_set_hops(tunnel->src_port,
516 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
517 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
518 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
519
520 tb_dp_port_set_hops(tunnel->dst_port,
521 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
522 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
523 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
524 } else {
525 tb_dp_port_hpd_clear(tunnel->src_port);
526 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
527 if (tb_port_is_dpout(tunnel->dst_port))
528 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
529 }
530
531 ret = tb_dp_port_enable(tunnel->src_port, active);
532 if (ret)
533 return ret;
534
535 if (tb_port_is_dpout(tunnel->dst_port))
536 return tb_dp_port_enable(tunnel->dst_port, active);
537
538 return 0;
539}
540
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200541static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
542 int *consumed_down)
Mika Westerberga11b88a2019-03-26 16:03:48 +0300543{
544 struct tb_port *in = tunnel->src_port;
545 const struct tb_switch *sw = in->sw;
546 u32 val, rate = 0, lanes = 0;
547 int ret;
548
Mika Westerbergb0407982019-12-17 15:33:40 +0300549 if (tb_dp_is_usb4(sw)) {
Mika Westerbergacf815b2020-05-16 12:32:33 +0300550 int timeout = 20;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300551
552 /*
553 * Wait for DPRX done. Normally it should be already set
554 * for active tunnel.
555 */
556 do {
557 ret = tb_port_read(in, &val, TB_CFG_PORT,
558 in->cap_adap + DP_COMMON_CAP, 1);
559 if (ret)
560 return ret;
561
562 if (val & DP_COMMON_CAP_DPRX_DONE) {
563 rate = tb_dp_cap_get_rate(val);
564 lanes = tb_dp_cap_get_lanes(val);
565 break;
566 }
567 msleep(250);
568 } while (timeout--);
569
570 if (!timeout)
571 return -ETIMEDOUT;
572 } else if (sw->generation >= 2) {
573 /*
574 * Read from the copied remote cap so that we take into
575 * account if capabilities were reduced during exchange.
576 */
577 ret = tb_port_read(in, &val, TB_CFG_PORT,
578 in->cap_adap + DP_REMOTE_CAP, 1);
579 if (ret)
580 return ret;
581
582 rate = tb_dp_cap_get_rate(val);
583 lanes = tb_dp_cap_get_lanes(val);
584 } else {
585 /* No bandwidth management for legacy devices */
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200586 *consumed_up = 0;
587 *consumed_down = 0;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300588 return 0;
589 }
590
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200591 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
592 *consumed_up = 0;
593 *consumed_down = tb_dp_bandwidth(rate, lanes);
594 } else {
595 *consumed_up = tb_dp_bandwidth(rate, lanes);
596 *consumed_down = 0;
597 }
598
599 return 0;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300600}
601
Mika Westerberg4f807e42018-09-17 16:30:49 +0300602static void tb_dp_init_aux_path(struct tb_path *path)
603{
604 int i;
605
606 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
607 path->egress_shared_buffer = TB_PATH_NONE;
608 path->ingress_fc_enable = TB_PATH_ALL;
609 path->ingress_shared_buffer = TB_PATH_NONE;
610 path->priority = 2;
611 path->weight = 1;
612
613 for (i = 0; i < path->path_length; i++)
614 path->hops[i].initial_credits = 1;
615}
616
617static void tb_dp_init_video_path(struct tb_path *path, bool discover)
618{
619 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
620
621 path->egress_fc_enable = TB_PATH_NONE;
622 path->egress_shared_buffer = TB_PATH_NONE;
623 path->ingress_fc_enable = TB_PATH_NONE;
624 path->ingress_shared_buffer = TB_PATH_NONE;
625 path->priority = 1;
626 path->weight = 1;
627
628 if (discover) {
Mika Westerberg8f57d472019-09-06 11:59:00 +0300629 path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300630 } else {
631 u32 max_credits;
632
Mika Westerberg8f57d472019-09-06 11:59:00 +0300633 max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
634 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300635 /* Leave some credits for AUX path */
636 path->nfc_credits = min(max_credits - 2, 12U);
637 }
638}
639
640/**
641 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
642 * @tb: Pointer to the domain structure
643 * @in: DP in adapter
644 *
645 * If @in adapter is active, follows the tunnel to the DP out adapter
646 * and back. Returns the discovered tunnel or %NULL if there was no
647 * tunnel.
648 *
649 * Return: DP tunnel or %NULL if no tunnel found.
650 */
651struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
652{
653 struct tb_tunnel *tunnel;
654 struct tb_port *port;
655 struct tb_path *path;
656
657 if (!tb_dp_port_is_enabled(in))
658 return NULL;
659
660 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
661 if (!tunnel)
662 return NULL;
663
664 tunnel->init = tb_dp_xchg_caps;
665 tunnel->activate = tb_dp_activate;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300666 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300667 tunnel->src_port = in;
668
669 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
670 &tunnel->dst_port, "Video");
671 if (!path) {
672 /* Just disable the DP IN port */
673 tb_dp_port_enable(in, false);
674 goto err_free;
675 }
676 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
677 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
678
679 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
680 if (!path)
681 goto err_deactivate;
682 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
683 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
684
685 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
686 &port, "AUX RX");
687 if (!path)
688 goto err_deactivate;
689 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
690 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
691
692 /* Validate that the tunnel is complete */
693 if (!tb_port_is_dpout(tunnel->dst_port)) {
694 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
695 goto err_deactivate;
696 }
697
698 if (!tb_dp_port_is_enabled(tunnel->dst_port))
699 goto err_deactivate;
700
701 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
702 goto err_deactivate;
703
704 if (port != tunnel->src_port) {
705 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
706 goto err_deactivate;
707 }
708
709 tb_tunnel_dbg(tunnel, "discovered\n");
710 return tunnel;
711
712err_deactivate:
713 tb_tunnel_deactivate(tunnel);
714err_free:
715 tb_tunnel_free(tunnel);
716
717 return NULL;
718}
719
720/**
721 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
722 * @tb: Pointer to the domain structure
723 * @in: DP in adapter port
724 * @out: DP out adapter port
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200725 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
726 * if not limited)
727 * @max_down: Maximum available downstream bandwidth for the DP tunnel
728 * (%0 if not limited)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300729 *
730 * Allocates a tunnel between @in and @out that is capable of tunneling
731 * Display Port traffic.
732 *
733 * Return: Returns a tb_tunnel on success or NULL on failure.
734 */
735struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200736 struct tb_port *out, int max_up,
737 int max_down)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300738{
739 struct tb_tunnel *tunnel;
740 struct tb_path **paths;
741 struct tb_path *path;
742
743 if (WARN_ON(!in->cap_adap || !out->cap_adap))
744 return NULL;
745
746 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
747 if (!tunnel)
748 return NULL;
749
750 tunnel->init = tb_dp_xchg_caps;
751 tunnel->activate = tb_dp_activate;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300752 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300753 tunnel->src_port = in;
754 tunnel->dst_port = out;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200755 tunnel->max_up = max_up;
756 tunnel->max_down = max_down;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300757
758 paths = tunnel->paths;
759
760 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
761 1, "Video");
762 if (!path)
763 goto err_free;
764 tb_dp_init_video_path(path, false);
765 paths[TB_DP_VIDEO_PATH_OUT] = path;
766
767 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
768 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
769 if (!path)
770 goto err_free;
771 tb_dp_init_aux_path(path);
772 paths[TB_DP_AUX_PATH_OUT] = path;
773
774 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
775 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
776 if (!path)
777 goto err_free;
778 tb_dp_init_aux_path(path);
779 paths[TB_DP_AUX_PATH_IN] = path;
780
781 return tunnel;
782
783err_free:
784 tb_tunnel_free(tunnel);
785 return NULL;
786}
787
Mika Westerberg44242d62018-09-28 16:35:32 +0300788static u32 tb_dma_credits(struct tb_port *nhi)
789{
790 u32 max_credits;
791
Mika Westerberg8f57d472019-09-06 11:59:00 +0300792 max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
793 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
Mika Westerberg44242d62018-09-28 16:35:32 +0300794 return min(max_credits, 13U);
795}
796
Mika Westerberge5876552021-01-14 20:27:58 +0300797static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits)
Mika Westerberg44242d62018-09-28 16:35:32 +0300798{
799 int i;
800
801 path->egress_fc_enable = efc;
802 path->ingress_fc_enable = TB_PATH_ALL;
803 path->egress_shared_buffer = TB_PATH_NONE;
Mika Westerberge5876552021-01-14 20:27:58 +0300804 path->ingress_shared_buffer = TB_PATH_NONE;
Mika Westerberg44242d62018-09-28 16:35:32 +0300805 path->priority = 5;
806 path->weight = 1;
807 path->clear_fc = true;
808
809 for (i = 0; i < path->path_length; i++)
810 path->hops[i].initial_credits = credits;
811}
812
813/**
814 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
815 * @tb: Pointer to the domain structure
816 * @nhi: Host controller port
817 * @dst: Destination null port which the other domain is connected to
818 * @transmit_ring: NHI ring number used to send packets towards the
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300819 * other domain. Set to %0 if TX path is not needed.
Mika Westerberg44242d62018-09-28 16:35:32 +0300820 * @transmit_path: HopID used for transmitting packets
821 * @receive_ring: NHI ring number used to receive packets from the
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300822 * other domain. Set to %0 if RX path is not needed.
Lee Jonesa27ea0d2021-01-27 11:25:53 +0000823 * @receive_path: HopID used for receiving packets
Mika Westerberg44242d62018-09-28 16:35:32 +0300824 *
825 * Return: Returns a tb_tunnel on success or NULL on failure.
826 */
827struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
828 struct tb_port *dst, int transmit_ring,
829 int transmit_path, int receive_ring,
830 int receive_path)
831{
832 struct tb_tunnel *tunnel;
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300833 size_t npaths = 0, i = 0;
Mika Westerberg44242d62018-09-28 16:35:32 +0300834 struct tb_path *path;
835 u32 credits;
836
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300837 if (receive_ring)
838 npaths++;
839 if (transmit_ring)
840 npaths++;
841
842 if (WARN_ON(!npaths))
843 return NULL;
844
845 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
Mika Westerberg44242d62018-09-28 16:35:32 +0300846 if (!tunnel)
847 return NULL;
848
Mika Westerberg44242d62018-09-28 16:35:32 +0300849 tunnel->src_port = nhi;
850 tunnel->dst_port = dst;
851
852 credits = tb_dma_credits(nhi);
853
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300854 if (receive_ring) {
855 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
856 "DMA RX");
857 if (!path) {
858 tb_tunnel_free(tunnel);
859 return NULL;
860 }
Mika Westerberge5876552021-01-14 20:27:58 +0300861 tb_dma_init_path(path, TB_PATH_SOURCE | TB_PATH_INTERNAL, credits);
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300862 tunnel->paths[i++] = path;
Mika Westerberg44242d62018-09-28 16:35:32 +0300863 }
Mika Westerberg44242d62018-09-28 16:35:32 +0300864
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300865 if (transmit_ring) {
866 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
867 "DMA TX");
868 if (!path) {
869 tb_tunnel_free(tunnel);
870 return NULL;
871 }
Mika Westerberge5876552021-01-14 20:27:58 +0300872 tb_dma_init_path(path, TB_PATH_ALL, credits);
Mika Westerberg5bf722d2020-10-07 18:17:12 +0300873 tunnel->paths[i++] = path;
Mika Westerberg44242d62018-09-28 16:35:32 +0300874 }
Mika Westerberg44242d62018-09-28 16:35:32 +0300875
876 return tunnel;
877}
878
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200879static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
880{
881 int ret, up_max_rate, down_max_rate;
882
883 ret = usb4_usb3_port_max_link_rate(up);
884 if (ret < 0)
885 return ret;
886 up_max_rate = ret;
887
888 ret = usb4_usb3_port_max_link_rate(down);
889 if (ret < 0)
890 return ret;
891 down_max_rate = ret;
892
893 return min(up_max_rate, down_max_rate);
894}
895
896static int tb_usb3_init(struct tb_tunnel *tunnel)
897{
898 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
899 tunnel->allocated_up, tunnel->allocated_down);
900
901 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
902 &tunnel->allocated_up,
903 &tunnel->allocated_down);
904}
905
Rajmohan Manie6f81852019-12-17 15:33:44 +0300906static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
907{
908 int res;
909
910 res = tb_usb3_port_enable(tunnel->src_port, activate);
911 if (res)
912 return res;
913
914 if (tb_port_is_usb3_up(tunnel->dst_port))
915 return tb_usb3_port_enable(tunnel->dst_port, activate);
916
917 return 0;
918}
919
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200920static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
921 int *consumed_up, int *consumed_down)
922{
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200923 int pcie_enabled = tb_acpi_may_tunnel_pcie();
924
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200925 /*
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200926 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
927 * take that it into account here.
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200928 */
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200929 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
930 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200931 return 0;
932}
933
934static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
935{
936 int ret;
937
938 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
939 &tunnel->allocated_up,
940 &tunnel->allocated_down);
941 if (ret)
942 return ret;
943
944 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
945 tunnel->allocated_up, tunnel->allocated_down);
946 return 0;
947}
948
949static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
950 int *available_up,
951 int *available_down)
952{
953 int ret, max_rate, allocate_up, allocate_down;
954
955 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
Mika Westerberg813050e2020-06-24 21:45:03 +0300956 if (ret < 0) {
957 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200958 return;
Mika Westerberg813050e2020-06-24 21:45:03 +0300959 } else if (!ret) {
960 /* Use maximum link rate if the link valid is not set */
961 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
962 if (ret < 0) {
963 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
964 return;
965 }
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200966 }
Mika Westerberg813050e2020-06-24 21:45:03 +0300967
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200968 /*
969 * 90% of the max rate can be allocated for isochronous
970 * transfers.
971 */
972 max_rate = ret * 90 / 100;
973
974 /* No need to reclaim if already at maximum */
975 if (tunnel->allocated_up >= max_rate &&
976 tunnel->allocated_down >= max_rate)
977 return;
978
979 /* Don't go lower than what is already allocated */
980 allocate_up = min(max_rate, *available_up);
981 if (allocate_up < tunnel->allocated_up)
982 allocate_up = tunnel->allocated_up;
983
984 allocate_down = min(max_rate, *available_down);
985 if (allocate_down < tunnel->allocated_down)
986 allocate_down = tunnel->allocated_down;
987
988 /* If no changes no need to do more */
989 if (allocate_up == tunnel->allocated_up &&
990 allocate_down == tunnel->allocated_down)
991 return;
992
993 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
994 &allocate_down);
995 if (ret) {
996 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
997 return;
998 }
999
1000 tunnel->allocated_up = allocate_up;
1001 *available_up -= tunnel->allocated_up;
1002
1003 tunnel->allocated_down = allocate_down;
1004 *available_down -= tunnel->allocated_down;
1005
1006 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1007 tunnel->allocated_up, tunnel->allocated_down);
1008}
1009
Rajmohan Manie6f81852019-12-17 15:33:44 +03001010static void tb_usb3_init_path(struct tb_path *path)
1011{
1012 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1013 path->egress_shared_buffer = TB_PATH_NONE;
1014 path->ingress_fc_enable = TB_PATH_ALL;
1015 path->ingress_shared_buffer = TB_PATH_NONE;
1016 path->priority = 3;
1017 path->weight = 3;
1018 path->drop_packages = 0;
1019 path->nfc_credits = 0;
1020 path->hops[0].initial_credits = 7;
Mika Westerberg75ab3f02020-05-08 11:55:03 +03001021 if (path->path_length > 1)
1022 path->hops[1].initial_credits =
1023 tb_initial_credits(path->hops[1].in_port->sw);
Rajmohan Manie6f81852019-12-17 15:33:44 +03001024}
1025
1026/**
1027 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1028 * @tb: Pointer to the domain structure
1029 * @down: USB3 downstream adapter
1030 *
1031 * If @down adapter is active, follows the tunnel to the USB3 upstream
1032 * adapter and back. Returns the discovered tunnel or %NULL if there was
1033 * no tunnel.
1034 */
1035struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1036{
1037 struct tb_tunnel *tunnel;
1038 struct tb_path *path;
1039
1040 if (!tb_usb3_port_is_enabled(down))
1041 return NULL;
1042
1043 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1044 if (!tunnel)
1045 return NULL;
1046
1047 tunnel->activate = tb_usb3_activate;
1048 tunnel->src_port = down;
1049
1050 /*
1051 * Discover both paths even if they are not complete. We will
1052 * clean them up by calling tb_tunnel_deactivate() below in that
1053 * case.
1054 */
1055 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
Mika Westerberg783735f2020-04-02 12:45:34 +03001056 &tunnel->dst_port, "USB3 Down");
Rajmohan Manie6f81852019-12-17 15:33:44 +03001057 if (!path) {
1058 /* Just disable the downstream port */
1059 tb_usb3_port_enable(down, false);
1060 goto err_free;
1061 }
Rajmohan Manie6f81852019-12-17 15:33:44 +03001062 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1063 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1064
Mika Westerberg783735f2020-04-02 12:45:34 +03001065 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1066 "USB3 Up");
1067 if (!path)
1068 goto err_deactivate;
1069 tunnel->paths[TB_USB3_PATH_UP] = path;
1070 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1071
Rajmohan Manie6f81852019-12-17 15:33:44 +03001072 /* Validate that the tunnel is complete */
1073 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1074 tb_port_warn(tunnel->dst_port,
1075 "path does not end on an USB3 adapter, cleaning up\n");
1076 goto err_deactivate;
1077 }
1078
1079 if (down != tunnel->src_port) {
1080 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1081 goto err_deactivate;
1082 }
1083
1084 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1085 tb_tunnel_warn(tunnel,
1086 "tunnel is not fully activated, cleaning up\n");
1087 goto err_deactivate;
1088 }
1089
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001090 if (!tb_route(down->sw)) {
1091 int ret;
1092
1093 /*
1094 * Read the initial bandwidth allocation for the first
1095 * hop tunnel.
1096 */
1097 ret = usb4_usb3_port_allocated_bandwidth(down,
1098 &tunnel->allocated_up, &tunnel->allocated_down);
1099 if (ret)
1100 goto err_deactivate;
1101
1102 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1103 tunnel->allocated_up, tunnel->allocated_down);
1104
1105 tunnel->init = tb_usb3_init;
1106 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1107 tunnel->release_unused_bandwidth =
1108 tb_usb3_release_unused_bandwidth;
1109 tunnel->reclaim_available_bandwidth =
1110 tb_usb3_reclaim_available_bandwidth;
1111 }
1112
Rajmohan Manie6f81852019-12-17 15:33:44 +03001113 tb_tunnel_dbg(tunnel, "discovered\n");
1114 return tunnel;
1115
1116err_deactivate:
1117 tb_tunnel_deactivate(tunnel);
1118err_free:
1119 tb_tunnel_free(tunnel);
1120
1121 return NULL;
1122}
1123
1124/**
1125 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1126 * @tb: Pointer to the domain structure
1127 * @up: USB3 upstream adapter port
1128 * @down: USB3 downstream adapter port
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001129 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1130 * if not limited).
1131 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1132 * (%0 if not limited).
Rajmohan Manie6f81852019-12-17 15:33:44 +03001133 *
1134 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1135 * @TB_TYPE_USB3_DOWN.
1136 *
1137 * Return: Returns a tb_tunnel on success or %NULL on failure.
1138 */
1139struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001140 struct tb_port *down, int max_up,
1141 int max_down)
Rajmohan Manie6f81852019-12-17 15:33:44 +03001142{
1143 struct tb_tunnel *tunnel;
1144 struct tb_path *path;
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001145 int max_rate = 0;
1146
1147 /*
1148 * Check that we have enough bandwidth available for the new
1149 * USB3 tunnel.
1150 */
1151 if (max_up > 0 || max_down > 0) {
1152 max_rate = tb_usb3_max_link_rate(down, up);
1153 if (max_rate < 0)
1154 return NULL;
1155
1156 /* Only 90% can be allocated for USB3 isochronous transfers */
1157 max_rate = max_rate * 90 / 100;
1158 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1159 max_rate);
1160
1161 if (max_rate > max_up || max_rate > max_down) {
1162 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1163 return NULL;
1164 }
1165 }
Rajmohan Manie6f81852019-12-17 15:33:44 +03001166
1167 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1168 if (!tunnel)
1169 return NULL;
1170
1171 tunnel->activate = tb_usb3_activate;
1172 tunnel->src_port = down;
1173 tunnel->dst_port = up;
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001174 tunnel->max_up = max_up;
1175 tunnel->max_down = max_down;
Rajmohan Manie6f81852019-12-17 15:33:44 +03001176
1177 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1178 "USB3 Down");
1179 if (!path) {
1180 tb_tunnel_free(tunnel);
1181 return NULL;
1182 }
1183 tb_usb3_init_path(path);
1184 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1185
1186 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1187 "USB3 Up");
1188 if (!path) {
1189 tb_tunnel_free(tunnel);
1190 return NULL;
1191 }
1192 tb_usb3_init_path(path);
1193 tunnel->paths[TB_USB3_PATH_UP] = path;
1194
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001195 if (!tb_route(down->sw)) {
1196 tunnel->allocated_up = max_rate;
1197 tunnel->allocated_down = max_rate;
1198
1199 tunnel->init = tb_usb3_init;
1200 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1201 tunnel->release_unused_bandwidth =
1202 tb_usb3_release_unused_bandwidth;
1203 tunnel->reclaim_available_bandwidth =
1204 tb_usb3_reclaim_available_bandwidth;
1205 }
1206
Rajmohan Manie6f81852019-12-17 15:33:44 +03001207 return tunnel;
1208}
1209
Andreas Noever3364f0c2014-06-03 22:04:08 +02001210/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001211 * tb_tunnel_free() - free a tunnel
1212 * @tunnel: Tunnel to be freed
Andreas Noever3364f0c2014-06-03 22:04:08 +02001213 *
Mika Westerbergab9f31c2019-03-06 18:21:08 +02001214 * Frees a tunnel. The tunnel does not need to be deactivated.
Andreas Noever3364f0c2014-06-03 22:04:08 +02001215 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001216void tb_tunnel_free(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001217{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001218 int i;
1219
1220 if (!tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001221 return;
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001222
1223 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001224 if (tunnel->paths[i])
1225 tb_path_free(tunnel->paths[i]);
1226 }
1227
1228 kfree(tunnel->paths);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001229 kfree(tunnel);
1230}
1231
1232/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001233 * tb_tunnel_is_invalid - check whether an activated path is still valid
1234 * @tunnel: Tunnel to check
Andreas Noever3364f0c2014-06-03 22:04:08 +02001235 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001236bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001237{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001238 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001239
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001240 for (i = 0; i < tunnel->npaths; i++) {
1241 WARN_ON(!tunnel->paths[i]->activated);
1242 if (tb_path_is_invalid(tunnel->paths[i]))
1243 return true;
1244 }
1245
1246 return false;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001247}
1248
1249/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001250 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1251 * @tunnel: Tunnel to restart
Andreas Noever3364f0c2014-06-03 22:04:08 +02001252 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001253 * Return: 0 on success and negative errno in case if failure
Andreas Noever3364f0c2014-06-03 22:04:08 +02001254 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001255int tb_tunnel_restart(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001256{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001257 int res, i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001258
Mika Westerberg62efe692018-09-17 16:32:13 +03001259 tb_tunnel_dbg(tunnel, "activating\n");
Andreas Noever3364f0c2014-06-03 22:04:08 +02001260
Mika Westerbergaae9e272017-02-19 23:37:35 +02001261 /*
1262 * Make sure all paths are properly disabled before enabling
1263 * them again.
1264 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001265 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerbergaae9e272017-02-19 23:37:35 +02001266 if (tunnel->paths[i]->activated) {
1267 tb_path_deactivate(tunnel->paths[i]);
1268 tunnel->paths[i]->activated = false;
1269 }
1270 }
1271
Mika Westerberg4f807e42018-09-17 16:30:49 +03001272 if (tunnel->init) {
1273 res = tunnel->init(tunnel);
1274 if (res)
1275 return res;
1276 }
1277
Mika Westerbergaae9e272017-02-19 23:37:35 +02001278 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001279 res = tb_path_activate(tunnel->paths[i]);
1280 if (res)
1281 goto err;
1282 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001283
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001284 if (tunnel->activate) {
1285 res = tunnel->activate(tunnel, true);
1286 if (res)
1287 goto err;
1288 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001289
Andreas Noever3364f0c2014-06-03 22:04:08 +02001290 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001291
Andreas Noever3364f0c2014-06-03 22:04:08 +02001292err:
1293 tb_tunnel_warn(tunnel, "activation failed\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001294 tb_tunnel_deactivate(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001295 return res;
1296}
1297
1298/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001299 * tb_tunnel_activate() - activate a tunnel
1300 * @tunnel: Tunnel to activate
Andreas Noever3364f0c2014-06-03 22:04:08 +02001301 *
1302 * Return: Returns 0 on success or an error code on failure.
1303 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001304int tb_tunnel_activate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001305{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001306 int i;
1307
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001308 for (i = 0; i < tunnel->npaths; i++) {
1309 if (tunnel->paths[i]->activated) {
1310 tb_tunnel_WARN(tunnel,
1311 "trying to activate an already activated tunnel\n");
1312 return -EINVAL;
1313 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001314 }
1315
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001316 return tb_tunnel_restart(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001317}
1318
Andreas Noever3364f0c2014-06-03 22:04:08 +02001319/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001320 * tb_tunnel_deactivate() - deactivate a tunnel
1321 * @tunnel: Tunnel to deactivate
Andreas Noever3364f0c2014-06-03 22:04:08 +02001322 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001323void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001324{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001325 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001326
Mika Westerberg62efe692018-09-17 16:32:13 +03001327 tb_tunnel_dbg(tunnel, "deactivating\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001328
1329 if (tunnel->activate)
1330 tunnel->activate(tunnel, false);
1331
1332 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg0414bec2017-02-19 23:43:26 +02001333 if (tunnel->paths[i] && tunnel->paths[i]->activated)
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001334 tb_path_deactivate(tunnel->paths[i]);
1335 }
1336}
Mika Westerberga11b88a2019-03-26 16:03:48 +03001337
1338/**
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001339 * tb_tunnel_port_on_path() - Does the tunnel go through port
Mika Westerberga11b88a2019-03-26 16:03:48 +03001340 * @tunnel: Tunnel to check
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001341 * @port: Port to check
Mika Westerberga11b88a2019-03-26 16:03:48 +03001342 *
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001343 * Returns true if @tunnel goes through @port (direction does not matter),
Mika Westerberga11b88a2019-03-26 16:03:48 +03001344 * false otherwise.
1345 */
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001346bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1347 const struct tb_port *port)
Mika Westerberga11b88a2019-03-26 16:03:48 +03001348{
1349 int i;
1350
1351 for (i = 0; i < tunnel->npaths; i++) {
1352 if (!tunnel->paths[i])
1353 continue;
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001354
1355 if (tb_path_port_on_path(tunnel->paths[i], port))
Mika Westerberga11b88a2019-03-26 16:03:48 +03001356 return true;
1357 }
1358
1359 return false;
1360}
1361
1362static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1363{
1364 int i;
1365
1366 for (i = 0; i < tunnel->npaths; i++) {
1367 if (!tunnel->paths[i])
1368 return false;
1369 if (!tunnel->paths[i]->activated)
1370 return false;
1371 }
1372
1373 return true;
1374}
1375
1376/**
1377 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1378 * @tunnel: Tunnel to check
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001379 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1380 * Can be %NULL.
1381 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1382 * Can be %NULL.
Mika Westerberga11b88a2019-03-26 16:03:48 +03001383 *
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001384 * Stores the amount of isochronous bandwidth @tunnel consumes in
1385 * @consumed_up and @consumed_down. In case of success returns %0,
1386 * negative errno otherwise.
Mika Westerberga11b88a2019-03-26 16:03:48 +03001387 */
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001388int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1389 int *consumed_down)
Mika Westerberga11b88a2019-03-26 16:03:48 +03001390{
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001391 int up_bw = 0, down_bw = 0;
1392
Mika Westerberga11b88a2019-03-26 16:03:48 +03001393 if (!tb_tunnel_is_active(tunnel))
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001394 goto out;
Mika Westerberga11b88a2019-03-26 16:03:48 +03001395
1396 if (tunnel->consumed_bandwidth) {
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001397 int ret;
Mika Westerberga11b88a2019-03-26 16:03:48 +03001398
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001399 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1400 if (ret)
1401 return ret;
1402
1403 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1404 down_bw);
Mika Westerberga11b88a2019-03-26 16:03:48 +03001405 }
1406
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001407out:
1408 if (consumed_up)
1409 *consumed_up = up_bw;
1410 if (consumed_down)
1411 *consumed_down = down_bw;
1412
Mika Westerberga11b88a2019-03-26 16:03:48 +03001413 return 0;
1414}
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001415
1416/**
1417 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1418 * @tunnel: Tunnel whose unused bandwidth to release
1419 *
1420 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1421 * moment) this function makes it to release all the unused bandwidth.
1422 *
1423 * Returns %0 in case of success and negative errno otherwise.
1424 */
1425int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1426{
1427 if (!tb_tunnel_is_active(tunnel))
1428 return 0;
1429
1430 if (tunnel->release_unused_bandwidth) {
1431 int ret;
1432
1433 ret = tunnel->release_unused_bandwidth(tunnel);
1434 if (ret)
1435 return ret;
1436 }
1437
1438 return 0;
1439}
1440
1441/**
1442 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1443 * @tunnel: Tunnel reclaiming available bandwidth
1444 * @available_up: Available upstream bandwidth (in Mb/s)
1445 * @available_down: Available downstream bandwidth (in Mb/s)
1446 *
1447 * Reclaims bandwidth from @available_up and @available_down and updates
1448 * the variables accordingly (e.g decreases both according to what was
1449 * reclaimed by the tunnel). If nothing was reclaimed the values are
1450 * kept as is.
1451 */
1452void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1453 int *available_up,
1454 int *available_down)
1455{
1456 if (!tb_tunnel_is_active(tunnel))
1457 return;
1458
1459 if (tunnel->reclaim_available_bandwidth)
1460 tunnel->reclaim_available_bandwidth(tunnel, available_up,
1461 available_down);
1462}