blob: a473cc7d9a8dae35812e76d4c15a97f97bb12587 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
Mika Westerbergde718ac2019-02-15 18:18:47 +02009#include <linux/delay.h>
Andreas Noever3364f0c2014-06-03 22:04:08 +020010#include <linux/slab.h>
11#include <linux/list.h>
12
Mika Westerberg1752b9f2017-02-19 10:58:35 +020013#include "tunnel.h"
Andreas Noever3364f0c2014-06-03 22:04:08 +020014#include "tb.h"
15
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +020016/* PCIe adapters use always HopID of 8 for both directions */
17#define TB_PCI_HOPID 8
18
Mika Westerberg93f36ad2017-02-19 13:48:29 +020019#define TB_PCI_PATH_DOWN 0
20#define TB_PCI_PATH_UP 1
21
Rajmohan Manie6f81852019-12-17 15:33:44 +030022/* USB3 adapters use always HopID of 8 for both directions */
23#define TB_USB3_HOPID 8
24
25#define TB_USB3_PATH_DOWN 0
26#define TB_USB3_PATH_UP 1
27
Mika Westerberg4f807e42018-09-17 16:30:49 +030028/* DP adapters use HopID 8 for AUX and 9 for Video */
29#define TB_DP_AUX_TX_HOPID 8
30#define TB_DP_AUX_RX_HOPID 8
31#define TB_DP_VIDEO_HOPID 9
32
33#define TB_DP_VIDEO_PATH_OUT 0
34#define TB_DP_AUX_PATH_OUT 1
35#define TB_DP_AUX_PATH_IN 2
36
Mika Westerberg6ed541c2021-03-22 18:09:35 +020037/* Minimum number of credits needed for PCIe path */
38#define TB_MIN_PCIE_CREDITS 6U
39/*
40 * Number of credits we try to allocate for each DMA path if not limited
41 * by the host router baMaxHI.
42 */
43#define TB_DMA_CREDITS 14U
44/* Minimum number of credits for DMA path */
45#define TB_MIN_DMA_CREDITS 1U
46
Rajmohan Manie6f81852019-12-17 15:33:44 +030047static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
Mika Westerberg4f807e42018-09-17 16:30:49 +030048
Andreas Noever3364f0c2014-06-03 22:04:08 +020049#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
50 do { \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020051 struct tb_tunnel *__tunnel = (tunnel); \
Mika Westerberg4f807e42018-09-17 16:30:49 +030052 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020053 tb_route(__tunnel->src_port->sw), \
54 __tunnel->src_port->port, \
55 tb_route(__tunnel->dst_port->sw), \
56 __tunnel->dst_port->port, \
Mika Westerberg4f807e42018-09-17 16:30:49 +030057 tb_tunnel_names[__tunnel->type], \
Andreas Noever3364f0c2014-06-03 22:04:08 +020058 ## arg); \
59 } while (0)
60
61#define tb_tunnel_WARN(tunnel, fmt, arg...) \
62 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
63#define tb_tunnel_warn(tunnel, fmt, arg...) \
64 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
65#define tb_tunnel_info(tunnel, fmt, arg...) \
66 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
Mika Westerberg0414bec2017-02-19 23:43:26 +020067#define tb_tunnel_dbg(tunnel, fmt, arg...) \
68 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
Andreas Noever3364f0c2014-06-03 22:04:08 +020069
Mika Westerberg6ed541c2021-03-22 18:09:35 +020070static inline unsigned int tb_usable_credits(const struct tb_port *port)
71{
72 return port->total_credits - port->ctl_credits;
73}
74
75/**
76 * tb_available_credits() - Available credits for PCIe and DMA
77 * @port: Lane adapter to check
78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
79 * streams possible through this lane adapter
80 */
81static unsigned int tb_available_credits(const struct tb_port *port,
82 size_t *max_dp_streams)
83{
84 const struct tb_switch *sw = port->sw;
85 int credits, usb3, pcie, spare;
86 size_t ndp;
87
88 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
89 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
90
91 if (tb_acpi_is_xdomain_allowed()) {
92 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
93 /* Add some credits for potential second DMA tunnel */
94 spare += TB_MIN_DMA_CREDITS;
95 } else {
96 spare = 0;
97 }
98
99 credits = tb_usable_credits(port);
100 if (tb_acpi_may_tunnel_dp()) {
101 /*
102 * Maximum number of DP streams possible through the
103 * lane adapter.
104 */
105 ndp = (credits - (usb3 + pcie + spare)) /
106 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
107 } else {
108 ndp = 0;
109 }
110 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
111 credits -= usb3;
112
113 if (max_dp_streams)
114 *max_dp_streams = ndp;
115
116 return credits > 0 ? credits : 0;
117}
118
Mika Westerberg4f807e42018-09-17 16:30:49 +0300119static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
120 enum tb_tunnel_type type)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200121{
122 struct tb_tunnel *tunnel;
123
124 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
125 if (!tunnel)
126 return NULL;
127
128 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
129 if (!tunnel->paths) {
130 tb_tunnel_free(tunnel);
131 return NULL;
132 }
133
134 INIT_LIST_HEAD(&tunnel->list);
135 tunnel->tb = tb;
136 tunnel->npaths = npaths;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300137 tunnel->type = type;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200138
139 return tunnel;
140}
141
142static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
143{
144 int res;
145
146 res = tb_pci_port_enable(tunnel->src_port, activate);
147 if (res)
148 return res;
149
Mika Westerberg0414bec2017-02-19 23:43:26 +0200150 if (tb_port_is_pcie_up(tunnel->dst_port))
151 return tb_pci_port_enable(tunnel->dst_port, activate);
152
153 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200154}
155
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200156static int tb_pci_init_credits(struct tb_path_hop *hop)
Mika Westerberg91c0c122019-03-21 19:03:00 +0200157{
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200158 struct tb_port *port = hop->in_port;
159 struct tb_switch *sw = port->sw;
160 unsigned int credits;
161
162 if (tb_port_use_credit_allocation(port)) {
163 unsigned int available;
164
165 available = tb_available_credits(port, NULL);
166 credits = min(sw->max_pcie_credits, available);
167
168 if (credits < TB_MIN_PCIE_CREDITS)
169 return -ENOSPC;
170
171 credits = max(TB_MIN_PCIE_CREDITS, credits);
172 } else {
173 if (tb_port_is_null(port))
174 credits = port->bonded ? 32 : 16;
175 else
176 credits = 7;
Mika Westerberg91c0c122019-03-21 19:03:00 +0200177 }
178
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200179 hop->initial_credits = credits;
180 return 0;
Mika Westerberg91c0c122019-03-21 19:03:00 +0200181}
182
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200183static int tb_pci_init_path(struct tb_path *path)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200184{
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200185 struct tb_path_hop *hop;
186
Andreas Noever3364f0c2014-06-03 22:04:08 +0200187 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
188 path->egress_shared_buffer = TB_PATH_NONE;
189 path->ingress_fc_enable = TB_PATH_ALL;
190 path->ingress_shared_buffer = TB_PATH_NONE;
191 path->priority = 3;
192 path->weight = 1;
193 path->drop_packages = 0;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200194
195 tb_path_for_each_hop(path, hop) {
196 int ret;
197
198 ret = tb_pci_init_credits(hop);
199 if (ret)
200 return ret;
201 }
202
203 return 0;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200204}
205
206/**
207 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
208 * @tb: Pointer to the domain structure
209 * @down: PCIe downstream adapter
Mika Westerberg43bddb22021-11-14 17:20:59 +0200210 * @alloc_hopid: Allocate HopIDs from visited ports
Mika Westerberg0414bec2017-02-19 23:43:26 +0200211 *
212 * If @down adapter is active, follows the tunnel to the PCIe upstream
213 * adapter and back. Returns the discovered tunnel or %NULL if there was
214 * no tunnel.
215 */
Mika Westerberg43bddb22021-11-14 17:20:59 +0200216struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
217 bool alloc_hopid)
Mika Westerberg0414bec2017-02-19 23:43:26 +0200218{
219 struct tb_tunnel *tunnel;
220 struct tb_path *path;
221
222 if (!tb_pci_port_is_enabled(down))
223 return NULL;
224
Mika Westerberg4f807e42018-09-17 16:30:49 +0300225 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200226 if (!tunnel)
227 return NULL;
228
229 tunnel->activate = tb_pci_activate;
230 tunnel->src_port = down;
231
232 /*
233 * Discover both paths even if they are not complete. We will
234 * clean them up by calling tb_tunnel_deactivate() below in that
235 * case.
236 */
237 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
Mika Westerberg43bddb22021-11-14 17:20:59 +0200238 &tunnel->dst_port, "PCIe Up", alloc_hopid);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200239 if (!path) {
240 /* Just disable the downstream port */
241 tb_pci_port_enable(down, false);
242 goto err_free;
243 }
244 tunnel->paths[TB_PCI_PATH_UP] = path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200245 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
246 goto err_free;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200247
248 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
Mika Westerberg43bddb22021-11-14 17:20:59 +0200249 "PCIe Down", alloc_hopid);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200250 if (!path)
251 goto err_deactivate;
252 tunnel->paths[TB_PCI_PATH_DOWN] = path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200253 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
254 goto err_deactivate;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200255
256 /* Validate that the tunnel is complete */
257 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
258 tb_port_warn(tunnel->dst_port,
259 "path does not end on a PCIe adapter, cleaning up\n");
260 goto err_deactivate;
261 }
262
263 if (down != tunnel->src_port) {
264 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
265 goto err_deactivate;
266 }
267
268 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
269 tb_tunnel_warn(tunnel,
270 "tunnel is not fully activated, cleaning up\n");
271 goto err_deactivate;
272 }
273
274 tb_tunnel_dbg(tunnel, "discovered\n");
275 return tunnel;
276
277err_deactivate:
278 tb_tunnel_deactivate(tunnel);
279err_free:
280 tb_tunnel_free(tunnel);
281
282 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200283}
284
285/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200286 * tb_tunnel_alloc_pci() - allocate a pci tunnel
287 * @tb: Pointer to the domain structure
288 * @up: PCIe upstream adapter port
289 * @down: PCIe downstream adapter port
Andreas Noever3364f0c2014-06-03 22:04:08 +0200290 *
291 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
292 * TB_TYPE_PCIE_DOWN.
293 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200294 * Return: Returns a tb_tunnel on success or NULL on failure.
Andreas Noever3364f0c2014-06-03 22:04:08 +0200295 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200296struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
297 struct tb_port *down)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200298{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200299 struct tb_tunnel *tunnel;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200300 struct tb_path *path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200301
Mika Westerberg4f807e42018-09-17 16:30:49 +0300302 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200303 if (!tunnel)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200304 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200305
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200306 tunnel->activate = tb_pci_activate;
307 tunnel->src_port = down;
308 tunnel->dst_port = up;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200309
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200310 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
311 "PCIe Down");
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200312 if (!path)
313 goto err_free;
Mika Westerbergce19f912019-06-11 19:31:26 +0300314 tunnel->paths[TB_PCI_PATH_DOWN] = path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200315 if (tb_pci_init_path(path))
316 goto err_free;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200317
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200318 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
319 "PCIe Up");
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200320 if (!path)
321 goto err_free;
Mika Westerbergce19f912019-06-11 19:31:26 +0300322 tunnel->paths[TB_PCI_PATH_UP] = path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200323 if (tb_pci_init_path(path))
324 goto err_free;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200325
326 return tunnel;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200327
328err_free:
329 tb_tunnel_free(tunnel);
330 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200331}
332
Mika Westerbergb0407982019-12-17 15:33:40 +0300333static bool tb_dp_is_usb4(const struct tb_switch *sw)
334{
335 /* Titan Ridge DP adapters need the same treatment as USB4 */
336 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
337}
338
Mika Westerbergde718ac2019-02-15 18:18:47 +0200339static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
340{
341 int timeout = 10;
342 u32 val;
343 int ret;
344
345 /* Both ends need to support this */
Mika Westerbergb0407982019-12-17 15:33:40 +0300346 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
Mika Westerbergde718ac2019-02-15 18:18:47 +0200347 return 0;
348
349 ret = tb_port_read(out, &val, TB_CFG_PORT,
350 out->cap_adap + DP_STATUS_CTRL, 1);
351 if (ret)
352 return ret;
353
354 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
355
356 ret = tb_port_write(out, &val, TB_CFG_PORT,
357 out->cap_adap + DP_STATUS_CTRL, 1);
358 if (ret)
359 return ret;
360
361 do {
362 ret = tb_port_read(out, &val, TB_CFG_PORT,
363 out->cap_adap + DP_STATUS_CTRL, 1);
364 if (ret)
365 return ret;
366 if (!(val & DP_STATUS_CTRL_CMHS))
367 return 0;
368 usleep_range(10, 100);
369 } while (timeout--);
370
371 return -ETIMEDOUT;
372}
373
Mika Westerberga11b88a2019-03-26 16:03:48 +0300374static inline u32 tb_dp_cap_get_rate(u32 val)
375{
376 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
377
378 switch (rate) {
379 case DP_COMMON_CAP_RATE_RBR:
380 return 1620;
381 case DP_COMMON_CAP_RATE_HBR:
382 return 2700;
383 case DP_COMMON_CAP_RATE_HBR2:
384 return 5400;
385 case DP_COMMON_CAP_RATE_HBR3:
386 return 8100;
387 default:
388 return 0;
389 }
390}
391
392static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
393{
394 val &= ~DP_COMMON_CAP_RATE_MASK;
395 switch (rate) {
396 default:
397 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500398 fallthrough;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300399 case 1620:
400 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
401 break;
402 case 2700:
403 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
404 break;
405 case 5400:
406 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
407 break;
408 case 8100:
409 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
410 break;
411 }
412 return val;
413}
414
415static inline u32 tb_dp_cap_get_lanes(u32 val)
416{
417 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
418
419 switch (lanes) {
420 case DP_COMMON_CAP_1_LANE:
421 return 1;
422 case DP_COMMON_CAP_2_LANES:
423 return 2;
424 case DP_COMMON_CAP_4_LANES:
425 return 4;
426 default:
427 return 0;
428 }
429}
430
431static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
432{
433 val &= ~DP_COMMON_CAP_LANES_MASK;
434 switch (lanes) {
435 default:
436 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
437 lanes);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500438 fallthrough;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300439 case 1:
440 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
441 break;
442 case 2:
443 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
444 break;
445 case 4:
446 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
447 break;
448 }
449 return val;
450}
451
452static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
453{
454 /* Tunneling removes the DP 8b/10b encoding */
455 return rate * lanes * 8 / 10;
456}
457
458static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
459 u32 out_rate, u32 out_lanes, u32 *new_rate,
460 u32 *new_lanes)
461{
462 static const u32 dp_bw[][2] = {
463 /* Mb/s, lanes */
464 { 8100, 4 }, /* 25920 Mb/s */
465 { 5400, 4 }, /* 17280 Mb/s */
466 { 8100, 2 }, /* 12960 Mb/s */
467 { 2700, 4 }, /* 8640 Mb/s */
468 { 5400, 2 }, /* 8640 Mb/s */
469 { 8100, 1 }, /* 6480 Mb/s */
470 { 1620, 4 }, /* 5184 Mb/s */
471 { 5400, 1 }, /* 4320 Mb/s */
472 { 2700, 2 }, /* 4320 Mb/s */
473 { 1620, 2 }, /* 2592 Mb/s */
474 { 2700, 1 }, /* 2160 Mb/s */
475 { 1620, 1 }, /* 1296 Mb/s */
476 };
477 unsigned int i;
478
479 /*
480 * Find a combination that can fit into max_bw and does not
481 * exceed the maximum rate and lanes supported by the DP OUT and
482 * DP IN adapters.
483 */
484 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
485 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
486 continue;
487
488 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
489 continue;
490
491 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
492 *new_rate = dp_bw[i][0];
493 *new_lanes = dp_bw[i][1];
494 return 0;
495 }
496 }
497
498 return -ENOSR;
499}
500
Mika Westerberg4f807e42018-09-17 16:30:49 +0300501static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
502{
Mika Westerberga11b88a2019-03-26 16:03:48 +0300503 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300504 struct tb_port *out = tunnel->dst_port;
505 struct tb_port *in = tunnel->src_port;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200506 int ret, max_bw;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300507
508 /*
509 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
510 * newer generation hardware.
511 */
512 if (in->sw->generation < 2 || out->sw->generation < 2)
513 return 0;
514
Mika Westerbergde718ac2019-02-15 18:18:47 +0200515 /*
516 * Perform connection manager handshake between IN and OUT ports
517 * before capabilities exchange can take place.
518 */
519 ret = tb_dp_cm_handshake(in, out);
520 if (ret)
521 return ret;
522
Mika Westerberg4f807e42018-09-17 16:30:49 +0300523 /* Read both DP_LOCAL_CAP registers */
524 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300525 in->cap_adap + DP_LOCAL_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300526 if (ret)
527 return ret;
528
529 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300530 out->cap_adap + DP_LOCAL_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300531 if (ret)
532 return ret;
533
534 /* Write IN local caps to OUT remote caps */
535 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300536 out->cap_adap + DP_REMOTE_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300537 if (ret)
538 return ret;
539
Mika Westerberga11b88a2019-03-26 16:03:48 +0300540 in_rate = tb_dp_cap_get_rate(in_dp_cap);
541 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
542 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
543 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
544
545 /*
546 * If the tunnel bandwidth is limited (max_bw is set) then see
547 * if we need to reduce bandwidth to fit there.
548 */
549 out_rate = tb_dp_cap_get_rate(out_dp_cap);
550 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
551 bw = tb_dp_bandwidth(out_rate, out_lanes);
552 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
553 out_rate, out_lanes, bw);
554
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200555 if (in->sw->config.depth < out->sw->config.depth)
556 max_bw = tunnel->max_down;
557 else
558 max_bw = tunnel->max_up;
559
560 if (max_bw && bw > max_bw) {
Mika Westerberga11b88a2019-03-26 16:03:48 +0300561 u32 new_rate, new_lanes, new_bw;
562
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200563 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
Mika Westerberga11b88a2019-03-26 16:03:48 +0300564 out_rate, out_lanes, &new_rate,
565 &new_lanes);
566 if (ret) {
567 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
568 return ret;
569 }
570
571 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
572 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
573 new_rate, new_lanes, new_bw);
574
575 /*
576 * Set new rate and number of lanes before writing it to
577 * the IN port remote caps.
578 */
579 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
580 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
581 }
582
Mika Westerberg4f807e42018-09-17 16:30:49 +0300583 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
Mika Westerberg98176382019-09-06 11:32:15 +0300584 in->cap_adap + DP_REMOTE_CAP, 1);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300585}
586
587static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
588{
589 int ret;
590
591 if (active) {
592 struct tb_path **paths;
593 int last;
594
595 paths = tunnel->paths;
596 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
597
598 tb_dp_port_set_hops(tunnel->src_port,
599 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
600 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
601 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
602
603 tb_dp_port_set_hops(tunnel->dst_port,
604 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
605 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
606 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
607 } else {
608 tb_dp_port_hpd_clear(tunnel->src_port);
609 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
610 if (tb_port_is_dpout(tunnel->dst_port))
611 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
612 }
613
614 ret = tb_dp_port_enable(tunnel->src_port, active);
615 if (ret)
616 return ret;
617
618 if (tb_port_is_dpout(tunnel->dst_port))
619 return tb_dp_port_enable(tunnel->dst_port, active);
620
621 return 0;
622}
623
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200624static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
625 int *consumed_down)
Mika Westerberga11b88a2019-03-26 16:03:48 +0300626{
627 struct tb_port *in = tunnel->src_port;
628 const struct tb_switch *sw = in->sw;
629 u32 val, rate = 0, lanes = 0;
630 int ret;
631
Mika Westerbergb0407982019-12-17 15:33:40 +0300632 if (tb_dp_is_usb4(sw)) {
Mika Westerbergacf815b2020-05-16 12:32:33 +0300633 int timeout = 20;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300634
635 /*
636 * Wait for DPRX done. Normally it should be already set
637 * for active tunnel.
638 */
639 do {
640 ret = tb_port_read(in, &val, TB_CFG_PORT,
641 in->cap_adap + DP_COMMON_CAP, 1);
642 if (ret)
643 return ret;
644
645 if (val & DP_COMMON_CAP_DPRX_DONE) {
646 rate = tb_dp_cap_get_rate(val);
647 lanes = tb_dp_cap_get_lanes(val);
648 break;
649 }
650 msleep(250);
651 } while (timeout--);
652
653 if (!timeout)
654 return -ETIMEDOUT;
655 } else if (sw->generation >= 2) {
656 /*
657 * Read from the copied remote cap so that we take into
658 * account if capabilities were reduced during exchange.
659 */
660 ret = tb_port_read(in, &val, TB_CFG_PORT,
661 in->cap_adap + DP_REMOTE_CAP, 1);
662 if (ret)
663 return ret;
664
665 rate = tb_dp_cap_get_rate(val);
666 lanes = tb_dp_cap_get_lanes(val);
667 } else {
668 /* No bandwidth management for legacy devices */
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200669 *consumed_up = 0;
670 *consumed_down = 0;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300671 return 0;
672 }
673
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +0200674 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
675 *consumed_up = 0;
676 *consumed_down = tb_dp_bandwidth(rate, lanes);
677 } else {
678 *consumed_up = tb_dp_bandwidth(rate, lanes);
679 *consumed_down = 0;
680 }
681
682 return 0;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300683}
684
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200685static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
686{
687 struct tb_port *port = hop->in_port;
688 struct tb_switch *sw = port->sw;
689
690 if (tb_port_use_credit_allocation(port))
691 hop->initial_credits = sw->min_dp_aux_credits;
692 else
693 hop->initial_credits = 1;
694}
695
Mika Westerberg4f807e42018-09-17 16:30:49 +0300696static void tb_dp_init_aux_path(struct tb_path *path)
697{
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200698 struct tb_path_hop *hop;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300699
700 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
701 path->egress_shared_buffer = TB_PATH_NONE;
702 path->ingress_fc_enable = TB_PATH_ALL;
703 path->ingress_shared_buffer = TB_PATH_NONE;
704 path->priority = 2;
705 path->weight = 1;
706
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200707 tb_path_for_each_hop(path, hop)
708 tb_dp_init_aux_credits(hop);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300709}
710
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200711static int tb_dp_init_video_credits(struct tb_path_hop *hop)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300712{
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200713 struct tb_port *port = hop->in_port;
714 struct tb_switch *sw = port->sw;
715
716 if (tb_port_use_credit_allocation(port)) {
717 unsigned int nfc_credits;
718 size_t max_dp_streams;
719
720 tb_available_credits(port, &max_dp_streams);
721 /*
722 * Read the number of currently allocated NFC credits
723 * from the lane adapter. Since we only use them for DP
724 * tunneling we can use that to figure out how many DP
725 * tunnels already go through the lane adapter.
726 */
727 nfc_credits = port->config.nfc_credits &
728 ADP_CS_4_NFC_BUFFERS_MASK;
729 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
730 return -ENOSPC;
731
732 hop->nfc_credits = sw->min_dp_main_credits;
733 } else {
734 hop->nfc_credits = min(port->total_credits - 2, 12U);
735 }
736
737 return 0;
738}
739
740static int tb_dp_init_video_path(struct tb_path *path)
741{
742 struct tb_path_hop *hop;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300743
744 path->egress_fc_enable = TB_PATH_NONE;
745 path->egress_shared_buffer = TB_PATH_NONE;
746 path->ingress_fc_enable = TB_PATH_NONE;
747 path->ingress_shared_buffer = TB_PATH_NONE;
748 path->priority = 1;
749 path->weight = 1;
750
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200751 tb_path_for_each_hop(path, hop) {
752 int ret;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300753
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200754 ret = tb_dp_init_video_credits(hop);
755 if (ret)
756 return ret;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300757 }
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200758
759 return 0;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300760}
761
762/**
763 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
764 * @tb: Pointer to the domain structure
765 * @in: DP in adapter
Mika Westerberg43bddb22021-11-14 17:20:59 +0200766 * @alloc_hopid: Allocate HopIDs from visited ports
Mika Westerberg4f807e42018-09-17 16:30:49 +0300767 *
768 * If @in adapter is active, follows the tunnel to the DP out adapter
769 * and back. Returns the discovered tunnel or %NULL if there was no
770 * tunnel.
771 *
772 * Return: DP tunnel or %NULL if no tunnel found.
773 */
Mika Westerberg43bddb22021-11-14 17:20:59 +0200774struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
775 bool alloc_hopid)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300776{
777 struct tb_tunnel *tunnel;
778 struct tb_port *port;
779 struct tb_path *path;
780
781 if (!tb_dp_port_is_enabled(in))
782 return NULL;
783
784 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
785 if (!tunnel)
786 return NULL;
787
788 tunnel->init = tb_dp_xchg_caps;
789 tunnel->activate = tb_dp_activate;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300790 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300791 tunnel->src_port = in;
792
793 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
Mika Westerberg43bddb22021-11-14 17:20:59 +0200794 &tunnel->dst_port, "Video", alloc_hopid);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300795 if (!path) {
796 /* Just disable the DP IN port */
797 tb_dp_port_enable(in, false);
798 goto err_free;
799 }
800 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200801 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
802 goto err_free;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300803
Mika Westerberg43bddb22021-11-14 17:20:59 +0200804 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
805 alloc_hopid);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300806 if (!path)
807 goto err_deactivate;
808 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
809 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
810
811 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
Mika Westerberg43bddb22021-11-14 17:20:59 +0200812 &port, "AUX RX", alloc_hopid);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300813 if (!path)
814 goto err_deactivate;
815 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
816 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
817
818 /* Validate that the tunnel is complete */
819 if (!tb_port_is_dpout(tunnel->dst_port)) {
820 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
821 goto err_deactivate;
822 }
823
824 if (!tb_dp_port_is_enabled(tunnel->dst_port))
825 goto err_deactivate;
826
827 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
828 goto err_deactivate;
829
830 if (port != tunnel->src_port) {
831 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
832 goto err_deactivate;
833 }
834
835 tb_tunnel_dbg(tunnel, "discovered\n");
836 return tunnel;
837
838err_deactivate:
839 tb_tunnel_deactivate(tunnel);
840err_free:
841 tb_tunnel_free(tunnel);
842
843 return NULL;
844}
845
846/**
847 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
848 * @tb: Pointer to the domain structure
849 * @in: DP in adapter port
850 * @out: DP out adapter port
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200851 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
852 * if not limited)
853 * @max_down: Maximum available downstream bandwidth for the DP tunnel
854 * (%0 if not limited)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300855 *
856 * Allocates a tunnel between @in and @out that is capable of tunneling
857 * Display Port traffic.
858 *
859 * Return: Returns a tb_tunnel on success or NULL on failure.
860 */
861struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200862 struct tb_port *out, int max_up,
863 int max_down)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300864{
865 struct tb_tunnel *tunnel;
866 struct tb_path **paths;
867 struct tb_path *path;
868
869 if (WARN_ON(!in->cap_adap || !out->cap_adap))
870 return NULL;
871
872 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
873 if (!tunnel)
874 return NULL;
875
876 tunnel->init = tb_dp_xchg_caps;
877 tunnel->activate = tb_dp_activate;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300878 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300879 tunnel->src_port = in;
880 tunnel->dst_port = out;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200881 tunnel->max_up = max_up;
882 tunnel->max_down = max_down;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300883
884 paths = tunnel->paths;
885
886 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
887 1, "Video");
888 if (!path)
889 goto err_free;
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200890 tb_dp_init_video_path(path);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300891 paths[TB_DP_VIDEO_PATH_OUT] = path;
892
893 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
894 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
895 if (!path)
896 goto err_free;
897 tb_dp_init_aux_path(path);
898 paths[TB_DP_AUX_PATH_OUT] = path;
899
900 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
901 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
902 if (!path)
903 goto err_free;
904 tb_dp_init_aux_path(path);
905 paths[TB_DP_AUX_PATH_IN] = path;
906
907 return tunnel;
908
909err_free:
910 tb_tunnel_free(tunnel);
911 return NULL;
912}
913
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200914static unsigned int tb_dma_available_credits(const struct tb_port *port)
Mika Westerberg44242d62018-09-28 16:35:32 +0300915{
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200916 const struct tb_switch *sw = port->sw;
917 int credits;
Mika Westerberg44242d62018-09-28 16:35:32 +0300918
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200919 credits = tb_available_credits(port, NULL);
920 if (tb_acpi_may_tunnel_pcie())
921 credits -= sw->max_pcie_credits;
922 credits -= port->dma_credits;
923
924 return credits > 0 ? credits : 0;
Mika Westerberg44242d62018-09-28 16:35:32 +0300925}
926
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200927static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
Mika Westerberg44242d62018-09-28 16:35:32 +0300928{
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200929 struct tb_port *port = hop->in_port;
Mika Westerberg44242d62018-09-28 16:35:32 +0300930
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200931 if (tb_port_use_credit_allocation(port)) {
932 unsigned int available = tb_dma_available_credits(port);
933
934 /*
935 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
936 * DMA path cannot be established.
937 */
938 if (available < TB_MIN_DMA_CREDITS)
939 return -ENOSPC;
940
941 while (credits > available)
942 credits--;
943
944 tb_port_dbg(port, "reserving %u credits for DMA path\n",
945 credits);
946
947 port->dma_credits += credits;
948 } else {
949 if (tb_port_is_null(port))
950 credits = port->bonded ? 14 : 6;
951 else
952 credits = min(port->total_credits, credits);
953 }
954
955 hop->initial_credits = credits;
956 return 0;
957}
958
959/* Path from lane adapter to NHI */
960static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
961{
962 struct tb_path_hop *hop;
963 unsigned int i, tmp;
964
965 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
Mika Westerberg44242d62018-09-28 16:35:32 +0300966 path->ingress_fc_enable = TB_PATH_ALL;
967 path->egress_shared_buffer = TB_PATH_NONE;
Mika Westerberge5876552021-01-14 20:27:58 +0300968 path->ingress_shared_buffer = TB_PATH_NONE;
Mika Westerberg44242d62018-09-28 16:35:32 +0300969 path->priority = 5;
970 path->weight = 1;
971 path->clear_fc = true;
972
Mika Westerberg6ed541c2021-03-22 18:09:35 +0200973 /*
974 * First lane adapter is the one connected to the remote host.
975 * We don't tunnel other traffic over this link so can use all
976 * the credits (except the ones reserved for control traffic).
977 */
978 hop = &path->hops[0];
979 tmp = min(tb_usable_credits(hop->in_port), credits);
980 hop->initial_credits = tmp;
981 hop->in_port->dma_credits += tmp;
982
983 for (i = 1; i < path->path_length; i++) {
984 int ret;
985
986 ret = tb_dma_reserve_credits(&path->hops[i], credits);
987 if (ret)
988 return ret;
989 }
990
991 return 0;
992}
993
994/* Path from NHI to lane adapter */
995static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
996{
997 struct tb_path_hop *hop;
998
999 path->egress_fc_enable = TB_PATH_ALL;
1000 path->ingress_fc_enable = TB_PATH_ALL;
1001 path->egress_shared_buffer = TB_PATH_NONE;
1002 path->ingress_shared_buffer = TB_PATH_NONE;
1003 path->priority = 5;
1004 path->weight = 1;
1005 path->clear_fc = true;
1006
1007 tb_path_for_each_hop(path, hop) {
1008 int ret;
1009
1010 ret = tb_dma_reserve_credits(hop, credits);
1011 if (ret)
1012 return ret;
1013 }
1014
1015 return 0;
1016}
1017
1018static void tb_dma_release_credits(struct tb_path_hop *hop)
1019{
1020 struct tb_port *port = hop->in_port;
1021
1022 if (tb_port_use_credit_allocation(port)) {
1023 port->dma_credits -= hop->initial_credits;
1024
1025 tb_port_dbg(port, "released %u DMA path credits\n",
1026 hop->initial_credits);
1027 }
1028}
1029
1030static void tb_dma_deinit_path(struct tb_path *path)
1031{
1032 struct tb_path_hop *hop;
1033
1034 tb_path_for_each_hop(path, hop)
1035 tb_dma_release_credits(hop);
1036}
1037
1038static void tb_dma_deinit(struct tb_tunnel *tunnel)
1039{
1040 int i;
1041
1042 for (i = 0; i < tunnel->npaths; i++) {
1043 if (!tunnel->paths[i])
1044 continue;
1045 tb_dma_deinit_path(tunnel->paths[i]);
1046 }
Mika Westerberg44242d62018-09-28 16:35:32 +03001047}
1048
1049/**
1050 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1051 * @tb: Pointer to the domain structure
1052 * @nhi: Host controller port
1053 * @dst: Destination null port which the other domain is connected to
Mika Westerberg44242d62018-09-28 16:35:32 +03001054 * @transmit_path: HopID used for transmitting packets
Mika Westerberg180b0682021-01-08 16:25:39 +02001055 * @transmit_ring: NHI ring number used to send packets towards the
1056 * other domain. Set to %-1 if TX path is not needed.
Lee Jonesa27ea0d2021-01-27 11:25:53 +00001057 * @receive_path: HopID used for receiving packets
Mika Westerberg180b0682021-01-08 16:25:39 +02001058 * @receive_ring: NHI ring number used to receive packets from the
1059 * other domain. Set to %-1 if RX path is not needed.
Mika Westerberg44242d62018-09-28 16:35:32 +03001060 *
1061 * Return: Returns a tb_tunnel on success or NULL on failure.
1062 */
1063struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
Mika Westerberg180b0682021-01-08 16:25:39 +02001064 struct tb_port *dst, int transmit_path,
1065 int transmit_ring, int receive_path,
1066 int receive_ring)
Mika Westerberg44242d62018-09-28 16:35:32 +03001067{
1068 struct tb_tunnel *tunnel;
Mika Westerberg5bf722d2020-10-07 18:17:12 +03001069 size_t npaths = 0, i = 0;
Mika Westerberg44242d62018-09-28 16:35:32 +03001070 struct tb_path *path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001071 int credits;
Mika Westerberg44242d62018-09-28 16:35:32 +03001072
Mika Westerberg180b0682021-01-08 16:25:39 +02001073 if (receive_ring > 0)
Mika Westerberg5bf722d2020-10-07 18:17:12 +03001074 npaths++;
Mika Westerberg180b0682021-01-08 16:25:39 +02001075 if (transmit_ring > 0)
Mika Westerberg5bf722d2020-10-07 18:17:12 +03001076 npaths++;
1077
1078 if (WARN_ON(!npaths))
1079 return NULL;
1080
1081 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
Mika Westerberg44242d62018-09-28 16:35:32 +03001082 if (!tunnel)
1083 return NULL;
1084
Mika Westerberg44242d62018-09-28 16:35:32 +03001085 tunnel->src_port = nhi;
1086 tunnel->dst_port = dst;
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001087 tunnel->deinit = tb_dma_deinit;
Mika Westerberg44242d62018-09-28 16:35:32 +03001088
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001089 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
Mika Westerberg44242d62018-09-28 16:35:32 +03001090
Mika Westerberg180b0682021-01-08 16:25:39 +02001091 if (receive_ring > 0) {
Mika Westerberg5bf722d2020-10-07 18:17:12 +03001092 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1093 "DMA RX");
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001094 if (!path)
1095 goto err_free;
Mika Westerberg5bf722d2020-10-07 18:17:12 +03001096 tunnel->paths[i++] = path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001097 if (tb_dma_init_rx_path(path, credits)) {
1098 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1099 goto err_free;
1100 }
Mika Westerberg44242d62018-09-28 16:35:32 +03001101 }
Mika Westerberg44242d62018-09-28 16:35:32 +03001102
Mika Westerberg180b0682021-01-08 16:25:39 +02001103 if (transmit_ring > 0) {
Mika Westerberg5bf722d2020-10-07 18:17:12 +03001104 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1105 "DMA TX");
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001106 if (!path)
1107 goto err_free;
Mika Westerberg5bf722d2020-10-07 18:17:12 +03001108 tunnel->paths[i++] = path;
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001109 if (tb_dma_init_tx_path(path, credits)) {
1110 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1111 goto err_free;
1112 }
Mika Westerberg44242d62018-09-28 16:35:32 +03001113 }
Mika Westerberg44242d62018-09-28 16:35:32 +03001114
1115 return tunnel;
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001116
1117err_free:
1118 tb_tunnel_free(tunnel);
1119 return NULL;
Mika Westerberg44242d62018-09-28 16:35:32 +03001120}
1121
Mika Westerberg180b0682021-01-08 16:25:39 +02001122/**
1123 * tb_tunnel_match_dma() - Match DMA tunnel
1124 * @tunnel: Tunnel to match
1125 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1126 * @transmit_ring: NHI ring number used to send packets towards the
1127 * other domain. Pass %-1 to ignore.
1128 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1129 * @receive_ring: NHI ring number used to receive packets from the
1130 * other domain. Pass %-1 to ignore.
1131 *
1132 * This function can be used to match specific DMA tunnel, if there are
1133 * multiple DMA tunnels going through the same XDomain connection.
1134 * Returns true if there is match and false otherwise.
1135 */
1136bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1137 int transmit_ring, int receive_path, int receive_ring)
1138{
1139 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1140 int i;
1141
1142 if (!receive_ring || !transmit_ring)
1143 return false;
1144
1145 for (i = 0; i < tunnel->npaths; i++) {
1146 const struct tb_path *path = tunnel->paths[i];
1147
1148 if (!path)
1149 continue;
1150
1151 if (tb_port_is_nhi(path->hops[0].in_port))
1152 tx_path = path;
1153 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1154 rx_path = path;
1155 }
1156
1157 if (transmit_ring > 0 || transmit_path > 0) {
1158 if (!tx_path)
1159 return false;
1160 if (transmit_ring > 0 &&
1161 (tx_path->hops[0].in_hop_index != transmit_ring))
1162 return false;
1163 if (transmit_path > 0 &&
1164 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1165 return false;
1166 }
1167
1168 if (receive_ring > 0 || receive_path > 0) {
1169 if (!rx_path)
1170 return false;
1171 if (receive_path > 0 &&
1172 (rx_path->hops[0].in_hop_index != receive_path))
1173 return false;
1174 if (receive_ring > 0 &&
1175 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1176 return false;
1177 }
1178
1179 return true;
1180}
1181
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001182static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1183{
1184 int ret, up_max_rate, down_max_rate;
1185
1186 ret = usb4_usb3_port_max_link_rate(up);
1187 if (ret < 0)
1188 return ret;
1189 up_max_rate = ret;
1190
1191 ret = usb4_usb3_port_max_link_rate(down);
1192 if (ret < 0)
1193 return ret;
1194 down_max_rate = ret;
1195
1196 return min(up_max_rate, down_max_rate);
1197}
1198
1199static int tb_usb3_init(struct tb_tunnel *tunnel)
1200{
1201 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1202 tunnel->allocated_up, tunnel->allocated_down);
1203
1204 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1205 &tunnel->allocated_up,
1206 &tunnel->allocated_down);
1207}
1208
Rajmohan Manie6f81852019-12-17 15:33:44 +03001209static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1210{
1211 int res;
1212
1213 res = tb_usb3_port_enable(tunnel->src_port, activate);
1214 if (res)
1215 return res;
1216
1217 if (tb_port_is_usb3_up(tunnel->dst_port))
1218 return tb_usb3_port_enable(tunnel->dst_port, activate);
1219
1220 return 0;
1221}
1222
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001223static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1224 int *consumed_up, int *consumed_down)
1225{
Mika Westerbergc6da62a2020-02-18 16:14:42 +02001226 int pcie_enabled = tb_acpi_may_tunnel_pcie();
1227
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001228 /*
Mika Westerbergc6da62a2020-02-18 16:14:42 +02001229 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1230 * take that it into account here.
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001231 */
Mika Westerbergc6da62a2020-02-18 16:14:42 +02001232 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1233 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001234 return 0;
1235}
1236
1237static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1238{
1239 int ret;
1240
1241 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1242 &tunnel->allocated_up,
1243 &tunnel->allocated_down);
1244 if (ret)
1245 return ret;
1246
1247 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1248 tunnel->allocated_up, tunnel->allocated_down);
1249 return 0;
1250}
1251
1252static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1253 int *available_up,
1254 int *available_down)
1255{
1256 int ret, max_rate, allocate_up, allocate_down;
1257
1258 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
Mika Westerberg813050e2020-06-24 21:45:03 +03001259 if (ret < 0) {
1260 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001261 return;
Mika Westerberg813050e2020-06-24 21:45:03 +03001262 } else if (!ret) {
1263 /* Use maximum link rate if the link valid is not set */
1264 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1265 if (ret < 0) {
1266 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1267 return;
1268 }
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001269 }
Mika Westerberg813050e2020-06-24 21:45:03 +03001270
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001271 /*
1272 * 90% of the max rate can be allocated for isochronous
1273 * transfers.
1274 */
1275 max_rate = ret * 90 / 100;
1276
1277 /* No need to reclaim if already at maximum */
1278 if (tunnel->allocated_up >= max_rate &&
1279 tunnel->allocated_down >= max_rate)
1280 return;
1281
1282 /* Don't go lower than what is already allocated */
1283 allocate_up = min(max_rate, *available_up);
1284 if (allocate_up < tunnel->allocated_up)
1285 allocate_up = tunnel->allocated_up;
1286
1287 allocate_down = min(max_rate, *available_down);
1288 if (allocate_down < tunnel->allocated_down)
1289 allocate_down = tunnel->allocated_down;
1290
1291 /* If no changes no need to do more */
1292 if (allocate_up == tunnel->allocated_up &&
1293 allocate_down == tunnel->allocated_down)
1294 return;
1295
1296 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1297 &allocate_down);
1298 if (ret) {
1299 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1300 return;
1301 }
1302
1303 tunnel->allocated_up = allocate_up;
1304 *available_up -= tunnel->allocated_up;
1305
1306 tunnel->allocated_down = allocate_down;
1307 *available_down -= tunnel->allocated_down;
1308
1309 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1310 tunnel->allocated_up, tunnel->allocated_down);
1311}
1312
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001313static void tb_usb3_init_credits(struct tb_path_hop *hop)
1314{
1315 struct tb_port *port = hop->in_port;
1316 struct tb_switch *sw = port->sw;
1317 unsigned int credits;
1318
1319 if (tb_port_use_credit_allocation(port)) {
1320 credits = sw->max_usb3_credits;
1321 } else {
1322 if (tb_port_is_null(port))
1323 credits = port->bonded ? 32 : 16;
1324 else
1325 credits = 7;
1326 }
1327
1328 hop->initial_credits = credits;
1329}
1330
Rajmohan Manie6f81852019-12-17 15:33:44 +03001331static void tb_usb3_init_path(struct tb_path *path)
1332{
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001333 struct tb_path_hop *hop;
1334
Rajmohan Manie6f81852019-12-17 15:33:44 +03001335 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1336 path->egress_shared_buffer = TB_PATH_NONE;
1337 path->ingress_fc_enable = TB_PATH_ALL;
1338 path->ingress_shared_buffer = TB_PATH_NONE;
1339 path->priority = 3;
1340 path->weight = 3;
1341 path->drop_packages = 0;
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001342
1343 tb_path_for_each_hop(path, hop)
1344 tb_usb3_init_credits(hop);
Rajmohan Manie6f81852019-12-17 15:33:44 +03001345}
1346
1347/**
1348 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1349 * @tb: Pointer to the domain structure
1350 * @down: USB3 downstream adapter
Mika Westerberg43bddb22021-11-14 17:20:59 +02001351 * @alloc_hopid: Allocate HopIDs from visited ports
Rajmohan Manie6f81852019-12-17 15:33:44 +03001352 *
1353 * If @down adapter is active, follows the tunnel to the USB3 upstream
1354 * adapter and back. Returns the discovered tunnel or %NULL if there was
1355 * no tunnel.
1356 */
Mika Westerberg43bddb22021-11-14 17:20:59 +02001357struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1358 bool alloc_hopid)
Rajmohan Manie6f81852019-12-17 15:33:44 +03001359{
1360 struct tb_tunnel *tunnel;
1361 struct tb_path *path;
1362
1363 if (!tb_usb3_port_is_enabled(down))
1364 return NULL;
1365
1366 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1367 if (!tunnel)
1368 return NULL;
1369
1370 tunnel->activate = tb_usb3_activate;
1371 tunnel->src_port = down;
1372
1373 /*
1374 * Discover both paths even if they are not complete. We will
1375 * clean them up by calling tb_tunnel_deactivate() below in that
1376 * case.
1377 */
1378 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
Mika Westerberg43bddb22021-11-14 17:20:59 +02001379 &tunnel->dst_port, "USB3 Down", alloc_hopid);
Rajmohan Manie6f81852019-12-17 15:33:44 +03001380 if (!path) {
1381 /* Just disable the downstream port */
1382 tb_usb3_port_enable(down, false);
1383 goto err_free;
1384 }
Rajmohan Manie6f81852019-12-17 15:33:44 +03001385 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1386 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1387
Mika Westerberg783735f2020-04-02 12:45:34 +03001388 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
Mika Westerberg43bddb22021-11-14 17:20:59 +02001389 "USB3 Up", alloc_hopid);
Mika Westerberg783735f2020-04-02 12:45:34 +03001390 if (!path)
1391 goto err_deactivate;
1392 tunnel->paths[TB_USB3_PATH_UP] = path;
1393 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1394
Rajmohan Manie6f81852019-12-17 15:33:44 +03001395 /* Validate that the tunnel is complete */
1396 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1397 tb_port_warn(tunnel->dst_port,
1398 "path does not end on an USB3 adapter, cleaning up\n");
1399 goto err_deactivate;
1400 }
1401
1402 if (down != tunnel->src_port) {
1403 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1404 goto err_deactivate;
1405 }
1406
1407 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1408 tb_tunnel_warn(tunnel,
1409 "tunnel is not fully activated, cleaning up\n");
1410 goto err_deactivate;
1411 }
1412
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001413 if (!tb_route(down->sw)) {
1414 int ret;
1415
1416 /*
1417 * Read the initial bandwidth allocation for the first
1418 * hop tunnel.
1419 */
1420 ret = usb4_usb3_port_allocated_bandwidth(down,
1421 &tunnel->allocated_up, &tunnel->allocated_down);
1422 if (ret)
1423 goto err_deactivate;
1424
1425 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1426 tunnel->allocated_up, tunnel->allocated_down);
1427
1428 tunnel->init = tb_usb3_init;
1429 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1430 tunnel->release_unused_bandwidth =
1431 tb_usb3_release_unused_bandwidth;
1432 tunnel->reclaim_available_bandwidth =
1433 tb_usb3_reclaim_available_bandwidth;
1434 }
1435
Rajmohan Manie6f81852019-12-17 15:33:44 +03001436 tb_tunnel_dbg(tunnel, "discovered\n");
1437 return tunnel;
1438
1439err_deactivate:
1440 tb_tunnel_deactivate(tunnel);
1441err_free:
1442 tb_tunnel_free(tunnel);
1443
1444 return NULL;
1445}
1446
1447/**
1448 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1449 * @tb: Pointer to the domain structure
1450 * @up: USB3 upstream adapter port
1451 * @down: USB3 downstream adapter port
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001452 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1453 * if not limited).
1454 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1455 * (%0 if not limited).
Rajmohan Manie6f81852019-12-17 15:33:44 +03001456 *
1457 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1458 * @TB_TYPE_USB3_DOWN.
1459 *
1460 * Return: Returns a tb_tunnel on success or %NULL on failure.
1461 */
1462struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001463 struct tb_port *down, int max_up,
1464 int max_down)
Rajmohan Manie6f81852019-12-17 15:33:44 +03001465{
1466 struct tb_tunnel *tunnel;
1467 struct tb_path *path;
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001468 int max_rate = 0;
1469
1470 /*
1471 * Check that we have enough bandwidth available for the new
1472 * USB3 tunnel.
1473 */
1474 if (max_up > 0 || max_down > 0) {
1475 max_rate = tb_usb3_max_link_rate(down, up);
1476 if (max_rate < 0)
1477 return NULL;
1478
1479 /* Only 90% can be allocated for USB3 isochronous transfers */
1480 max_rate = max_rate * 90 / 100;
1481 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1482 max_rate);
1483
1484 if (max_rate > max_up || max_rate > max_down) {
1485 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1486 return NULL;
1487 }
1488 }
Rajmohan Manie6f81852019-12-17 15:33:44 +03001489
1490 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1491 if (!tunnel)
1492 return NULL;
1493
1494 tunnel->activate = tb_usb3_activate;
1495 tunnel->src_port = down;
1496 tunnel->dst_port = up;
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001497 tunnel->max_up = max_up;
1498 tunnel->max_down = max_down;
Rajmohan Manie6f81852019-12-17 15:33:44 +03001499
1500 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1501 "USB3 Down");
1502 if (!path) {
1503 tb_tunnel_free(tunnel);
1504 return NULL;
1505 }
1506 tb_usb3_init_path(path);
1507 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1508
1509 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1510 "USB3 Up");
1511 if (!path) {
1512 tb_tunnel_free(tunnel);
1513 return NULL;
1514 }
1515 tb_usb3_init_path(path);
1516 tunnel->paths[TB_USB3_PATH_UP] = path;
1517
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001518 if (!tb_route(down->sw)) {
1519 tunnel->allocated_up = max_rate;
1520 tunnel->allocated_down = max_rate;
1521
1522 tunnel->init = tb_usb3_init;
1523 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1524 tunnel->release_unused_bandwidth =
1525 tb_usb3_release_unused_bandwidth;
1526 tunnel->reclaim_available_bandwidth =
1527 tb_usb3_reclaim_available_bandwidth;
1528 }
1529
Rajmohan Manie6f81852019-12-17 15:33:44 +03001530 return tunnel;
1531}
1532
Andreas Noever3364f0c2014-06-03 22:04:08 +02001533/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001534 * tb_tunnel_free() - free a tunnel
1535 * @tunnel: Tunnel to be freed
Andreas Noever3364f0c2014-06-03 22:04:08 +02001536 *
Mika Westerbergab9f31c2019-03-06 18:21:08 +02001537 * Frees a tunnel. The tunnel does not need to be deactivated.
Andreas Noever3364f0c2014-06-03 22:04:08 +02001538 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001539void tb_tunnel_free(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001540{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001541 int i;
1542
1543 if (!tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001544 return;
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001545
Mika Westerberg6ed541c2021-03-22 18:09:35 +02001546 if (tunnel->deinit)
1547 tunnel->deinit(tunnel);
1548
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001549 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001550 if (tunnel->paths[i])
1551 tb_path_free(tunnel->paths[i]);
1552 }
1553
1554 kfree(tunnel->paths);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001555 kfree(tunnel);
1556}
1557
1558/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001559 * tb_tunnel_is_invalid - check whether an activated path is still valid
1560 * @tunnel: Tunnel to check
Andreas Noever3364f0c2014-06-03 22:04:08 +02001561 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001562bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001563{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001564 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001565
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001566 for (i = 0; i < tunnel->npaths; i++) {
1567 WARN_ON(!tunnel->paths[i]->activated);
1568 if (tb_path_is_invalid(tunnel->paths[i]))
1569 return true;
1570 }
1571
1572 return false;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001573}
1574
1575/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001576 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1577 * @tunnel: Tunnel to restart
Andreas Noever3364f0c2014-06-03 22:04:08 +02001578 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001579 * Return: 0 on success and negative errno in case if failure
Andreas Noever3364f0c2014-06-03 22:04:08 +02001580 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001581int tb_tunnel_restart(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001582{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001583 int res, i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001584
Mika Westerberg62efe692018-09-17 16:32:13 +03001585 tb_tunnel_dbg(tunnel, "activating\n");
Andreas Noever3364f0c2014-06-03 22:04:08 +02001586
Mika Westerbergaae9e272017-02-19 23:37:35 +02001587 /*
1588 * Make sure all paths are properly disabled before enabling
1589 * them again.
1590 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001591 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerbergaae9e272017-02-19 23:37:35 +02001592 if (tunnel->paths[i]->activated) {
1593 tb_path_deactivate(tunnel->paths[i]);
1594 tunnel->paths[i]->activated = false;
1595 }
1596 }
1597
Mika Westerberg4f807e42018-09-17 16:30:49 +03001598 if (tunnel->init) {
1599 res = tunnel->init(tunnel);
1600 if (res)
1601 return res;
1602 }
1603
Mika Westerbergaae9e272017-02-19 23:37:35 +02001604 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001605 res = tb_path_activate(tunnel->paths[i]);
1606 if (res)
1607 goto err;
1608 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001609
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001610 if (tunnel->activate) {
1611 res = tunnel->activate(tunnel, true);
1612 if (res)
1613 goto err;
1614 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001615
Andreas Noever3364f0c2014-06-03 22:04:08 +02001616 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001617
Andreas Noever3364f0c2014-06-03 22:04:08 +02001618err:
1619 tb_tunnel_warn(tunnel, "activation failed\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001620 tb_tunnel_deactivate(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001621 return res;
1622}
1623
1624/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001625 * tb_tunnel_activate() - activate a tunnel
1626 * @tunnel: Tunnel to activate
Andreas Noever3364f0c2014-06-03 22:04:08 +02001627 *
1628 * Return: Returns 0 on success or an error code on failure.
1629 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001630int tb_tunnel_activate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001631{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001632 int i;
1633
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001634 for (i = 0; i < tunnel->npaths; i++) {
1635 if (tunnel->paths[i]->activated) {
1636 tb_tunnel_WARN(tunnel,
1637 "trying to activate an already activated tunnel\n");
1638 return -EINVAL;
1639 }
Andreas Noever3364f0c2014-06-03 22:04:08 +02001640 }
1641
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001642 return tb_tunnel_restart(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001643}
1644
Andreas Noever3364f0c2014-06-03 22:04:08 +02001645/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001646 * tb_tunnel_deactivate() - deactivate a tunnel
1647 * @tunnel: Tunnel to deactivate
Andreas Noever3364f0c2014-06-03 22:04:08 +02001648 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001649void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +02001650{
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001651 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001652
Mika Westerberg62efe692018-09-17 16:32:13 +03001653 tb_tunnel_dbg(tunnel, "deactivating\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001654
1655 if (tunnel->activate)
1656 tunnel->activate(tunnel, false);
1657
1658 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg0414bec2017-02-19 23:43:26 +02001659 if (tunnel->paths[i] && tunnel->paths[i]->activated)
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001660 tb_path_deactivate(tunnel->paths[i]);
1661 }
1662}
Mika Westerberga11b88a2019-03-26 16:03:48 +03001663
1664/**
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001665 * tb_tunnel_port_on_path() - Does the tunnel go through port
Mika Westerberga11b88a2019-03-26 16:03:48 +03001666 * @tunnel: Tunnel to check
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001667 * @port: Port to check
Mika Westerberga11b88a2019-03-26 16:03:48 +03001668 *
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001669 * Returns true if @tunnel goes through @port (direction does not matter),
Mika Westerberga11b88a2019-03-26 16:03:48 +03001670 * false otherwise.
1671 */
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001672bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1673 const struct tb_port *port)
Mika Westerberga11b88a2019-03-26 16:03:48 +03001674{
1675 int i;
1676
1677 for (i = 0; i < tunnel->npaths; i++) {
1678 if (!tunnel->paths[i])
1679 continue;
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001680
1681 if (tb_path_port_on_path(tunnel->paths[i], port))
Mika Westerberga11b88a2019-03-26 16:03:48 +03001682 return true;
1683 }
1684
1685 return false;
1686}
1687
1688static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1689{
1690 int i;
1691
1692 for (i = 0; i < tunnel->npaths; i++) {
1693 if (!tunnel->paths[i])
1694 return false;
1695 if (!tunnel->paths[i]->activated)
1696 return false;
1697 }
1698
1699 return true;
1700}
1701
1702/**
1703 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1704 * @tunnel: Tunnel to check
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001705 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1706 * Can be %NULL.
1707 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1708 * Can be %NULL.
Mika Westerberga11b88a2019-03-26 16:03:48 +03001709 *
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001710 * Stores the amount of isochronous bandwidth @tunnel consumes in
1711 * @consumed_up and @consumed_down. In case of success returns %0,
1712 * negative errno otherwise.
Mika Westerberga11b88a2019-03-26 16:03:48 +03001713 */
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001714int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1715 int *consumed_down)
Mika Westerberga11b88a2019-03-26 16:03:48 +03001716{
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001717 int up_bw = 0, down_bw = 0;
1718
Mika Westerberga11b88a2019-03-26 16:03:48 +03001719 if (!tb_tunnel_is_active(tunnel))
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001720 goto out;
Mika Westerberga11b88a2019-03-26 16:03:48 +03001721
1722 if (tunnel->consumed_bandwidth) {
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001723 int ret;
Mika Westerberga11b88a2019-03-26 16:03:48 +03001724
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001725 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1726 if (ret)
1727 return ret;
1728
1729 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1730 down_bw);
Mika Westerberga11b88a2019-03-26 16:03:48 +03001731 }
1732
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +02001733out:
1734 if (consumed_up)
1735 *consumed_up = up_bw;
1736 if (consumed_down)
1737 *consumed_down = down_bw;
1738
Mika Westerberga11b88a2019-03-26 16:03:48 +03001739 return 0;
1740}
Mika Westerberg0bd680c2020-03-24 14:44:13 +02001741
1742/**
1743 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1744 * @tunnel: Tunnel whose unused bandwidth to release
1745 *
1746 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1747 * moment) this function makes it to release all the unused bandwidth.
1748 *
1749 * Returns %0 in case of success and negative errno otherwise.
1750 */
1751int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1752{
1753 if (!tb_tunnel_is_active(tunnel))
1754 return 0;
1755
1756 if (tunnel->release_unused_bandwidth) {
1757 int ret;
1758
1759 ret = tunnel->release_unused_bandwidth(tunnel);
1760 if (ret)
1761 return ret;
1762 }
1763
1764 return 0;
1765}
1766
1767/**
1768 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1769 * @tunnel: Tunnel reclaiming available bandwidth
1770 * @available_up: Available upstream bandwidth (in Mb/s)
1771 * @available_down: Available downstream bandwidth (in Mb/s)
1772 *
1773 * Reclaims bandwidth from @available_up and @available_down and updates
1774 * the variables accordingly (e.g decreases both according to what was
1775 * reclaimed by the tunnel). If nothing was reclaimed the values are
1776 * kept as is.
1777 */
1778void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1779 int *available_up,
1780 int *available_down)
1781{
1782 if (!tb_tunnel_is_active(tunnel))
1783 return;
1784
1785 if (tunnel->reclaim_available_bandwidth)
1786 tunnel->reclaim_available_bandwidth(tunnel, available_up,
1787 available_down);
1788}