blob: cbd0ad85ffb1d4aa24099559427c2dd2951c2fd7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030012#include <linux/pm_runtime.h>
Mika Westerberg349bfe02021-05-24 17:57:32 +030013#include <linux/platform_data/x86/apple.h>
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020014
15#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020016#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020017#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020018
Mika Westerberg7f0a34d2020-12-29 13:44:57 +020019#define TB_TIMEOUT 100 /* ms */
20
Mika Westerberg9d3cce02017-06-06 15:25:00 +030021/**
22 * struct tb_cm - Simple Thunderbolt connection manager
23 * @tunnel_list: List of active tunnels
Mika Westerberg8afe9092019-03-26 15:52:30 +030024 * @dp_resources: List of available DP resources for DP tunneling
Mika Westerberg9d3cce02017-06-06 15:25:00 +030025 * @hotplug_active: tb_handle_hotplug will stop progressing plug
26 * events and exit if this is not set (it needs to
27 * acquire the lock one more time). Used to drain wq
28 * after cfg has been paused.
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030029 * @remove_work: Work used to remove any unplugged routers after
30 * runtime resume
Mika Westerberg9d3cce02017-06-06 15:25:00 +030031 */
32struct tb_cm {
33 struct list_head tunnel_list;
Mika Westerberg8afe9092019-03-26 15:52:30 +030034 struct list_head dp_resources;
Mika Westerberg9d3cce02017-06-06 15:25:00 +030035 bool hotplug_active;
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030036 struct delayed_work remove_work;
Mika Westerberg9d3cce02017-06-06 15:25:00 +030037};
Andreas Noever9da672a2014-06-03 22:04:05 +020038
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030039static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
40{
41 return ((void *)tcm - sizeof(struct tb));
42}
43
Mika Westerberg4f807e42018-09-17 16:30:49 +030044struct tb_hotplug_event {
45 struct work_struct work;
46 struct tb *tb;
47 u64 route;
48 u8 port;
49 bool unplug;
50};
51
52static void tb_handle_hotplug(struct work_struct *work);
53
54static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
55{
56 struct tb_hotplug_event *ev;
57
58 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
59 if (!ev)
60 return;
61
62 ev->tb = tb;
63 ev->route = route;
64 ev->port = port;
65 ev->unplug = unplug;
66 INIT_WORK(&ev->work, tb_handle_hotplug);
67 queue_work(tb->wq, &ev->work);
68}
69
Andreas Noever9da672a2014-06-03 22:04:05 +020070/* enumeration & hot plug handling */
71
Mika Westerberg8afe9092019-03-26 15:52:30 +030072static void tb_add_dp_resources(struct tb_switch *sw)
73{
74 struct tb_cm *tcm = tb_priv(sw->tb);
75 struct tb_port *port;
76
77 tb_switch_for_each_port(sw, port) {
78 if (!tb_port_is_dpin(port))
79 continue;
80
81 if (!tb_switch_query_dp_resource(sw, port))
82 continue;
83
84 list_add_tail(&port->list, &tcm->dp_resources);
85 tb_port_dbg(port, "DP IN resource available\n");
86 }
87}
88
89static void tb_remove_dp_resources(struct tb_switch *sw)
90{
91 struct tb_cm *tcm = tb_priv(sw->tb);
92 struct tb_port *port, *tmp;
93
94 /* Clear children resources first */
95 tb_switch_for_each_port(sw, port) {
96 if (tb_port_has_remote(port))
97 tb_remove_dp_resources(port->remote->sw);
98 }
99
100 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
101 if (port->sw == sw) {
102 tb_port_dbg(port, "DP OUT resource unavailable\n");
103 list_del_init(&port->list);
104 }
105 }
106}
107
Mika Westerberg43bddb22021-11-14 17:20:59 +0200108static void tb_switch_discover_tunnels(struct tb_switch *sw,
109 struct list_head *list,
110 bool alloc_hopids)
Mika Westerberg0414bec2017-02-19 23:43:26 +0200111{
112 struct tb *tb = sw->tb;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200113 struct tb_port *port;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200114
Mika Westerbergb433d012019-09-30 14:07:22 +0300115 tb_switch_for_each_port(sw, port) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200116 struct tb_tunnel *tunnel = NULL;
117
Mika Westerberg0414bec2017-02-19 23:43:26 +0200118 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +0300119 case TB_TYPE_DP_HDMI_IN:
Mika Westerberg43bddb22021-11-14 17:20:59 +0200120 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300121 break;
122
Mika Westerberg0414bec2017-02-19 23:43:26 +0200123 case TB_TYPE_PCIE_DOWN:
Mika Westerberg43bddb22021-11-14 17:20:59 +0200124 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200125 break;
126
Rajmohan Manie6f81852019-12-17 15:33:44 +0300127 case TB_TYPE_USB3_DOWN:
Mika Westerberg43bddb22021-11-14 17:20:59 +0200128 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
Rajmohan Manie6f81852019-12-17 15:33:44 +0300129 break;
130
Mika Westerberg0414bec2017-02-19 23:43:26 +0200131 default:
132 break;
133 }
134
Mika Westerberg43bddb22021-11-14 17:20:59 +0200135 if (tunnel)
136 list_add_tail(&tunnel->list, list);
137 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300138
Mika Westerberg43bddb22021-11-14 17:20:59 +0200139 tb_switch_for_each_port(sw, port) {
140 if (tb_port_has_remote(port)) {
141 tb_switch_discover_tunnels(port->remote->sw, list,
142 alloc_hopids);
143 }
144 }
145}
146
147static void tb_discover_tunnels(struct tb *tb)
148{
149 struct tb_cm *tcm = tb_priv(tb);
150 struct tb_tunnel *tunnel;
151
152 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
153
154 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
Mika Westerberg4f807e42018-09-17 16:30:49 +0300155 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200156 struct tb_switch *parent = tunnel->dst_port->sw;
157
158 while (parent != tunnel->src_port->sw) {
159 parent->boot = true;
160 parent = tb_switch_parent(parent);
161 }
Mika Westerbergc94732b2020-12-10 14:57:10 +0200162 } else if (tb_tunnel_is_dp(tunnel)) {
163 /* Keep the domain from powering down */
164 pm_runtime_get_sync(&tunnel->src_port->sw->dev);
165 pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200166 }
Mika Westerberg0414bec2017-02-19 23:43:26 +0200167 }
168}
Andreas Noever9da672a2014-06-03 22:04:05 +0200169
Mika Westerberg284652a2020-04-09 14:23:32 +0300170static int tb_port_configure_xdomain(struct tb_port *port)
171{
Mika Westerberg341d4512020-02-21 12:11:54 +0200172 /*
173 * XDomain paths currently only support single lane so we must
174 * disable the other lane according to USB4 spec.
175 */
176 tb_port_disable(port->dual_link_port);
177
Mika Westerberg284652a2020-04-09 14:23:32 +0300178 if (tb_switch_is_usb4(port->sw))
179 return usb4_port_configure_xdomain(port);
180 return tb_lc_configure_xdomain(port);
181}
182
183static void tb_port_unconfigure_xdomain(struct tb_port *port)
184{
185 if (tb_switch_is_usb4(port->sw))
186 usb4_port_unconfigure_xdomain(port);
187 else
188 tb_lc_unconfigure_xdomain(port);
Mika Westerberg341d4512020-02-21 12:11:54 +0200189
190 tb_port_enable(port->dual_link_port);
Mika Westerberg284652a2020-04-09 14:23:32 +0300191}
192
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300193static void tb_scan_xdomain(struct tb_port *port)
194{
195 struct tb_switch *sw = port->sw;
196 struct tb *tb = sw->tb;
197 struct tb_xdomain *xd;
198 u64 route;
199
Mika Westerberg5ca67682020-10-22 13:22:06 +0300200 if (!tb_is_xdomain_enabled())
201 return;
202
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300203 route = tb_downstream_route(port);
204 xd = tb_xdomain_find_by_route(tb, route);
205 if (xd) {
206 tb_xdomain_put(xd);
207 return;
208 }
209
210 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
211 NULL);
212 if (xd) {
213 tb_port_at(route, sw)->xdomain = xd;
Mika Westerberg284652a2020-04-09 14:23:32 +0300214 tb_port_configure_xdomain(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300215 tb_xdomain_add(xd);
216 }
217}
218
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300219static int tb_enable_tmu(struct tb_switch *sw)
220{
221 int ret;
222
223 /* If it is already enabled in correct mode, don't touch it */
Gil Finea28ec0e2021-12-17 03:16:38 +0200224 if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300225 return 0;
226
227 ret = tb_switch_tmu_disable(sw);
228 if (ret)
229 return ret;
230
231 ret = tb_switch_tmu_post_time(sw);
232 if (ret)
233 return ret;
234
235 return tb_switch_tmu_enable(sw);
236}
237
Rajmohan Manie6f81852019-12-17 15:33:44 +0300238/**
239 * tb_find_unused_port() - return the first inactive port on @sw
240 * @sw: Switch to find the port on
241 * @type: Port type to look for
242 */
243static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
244 enum tb_port_type type)
245{
246 struct tb_port *port;
247
248 tb_switch_for_each_port(sw, port) {
249 if (tb_is_upstream_port(port))
250 continue;
251 if (port->config.type != type)
252 continue;
253 if (!port->cap_adap)
254 continue;
255 if (tb_port_is_enabled(port))
256 continue;
257 return port;
258 }
259 return NULL;
260}
261
262static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
Mika Westerberg77cfa402020-03-11 16:00:46 +0300263 const struct tb_port *port)
Rajmohan Manie6f81852019-12-17 15:33:44 +0300264{
265 struct tb_port *down;
266
267 down = usb4_switch_map_usb3_down(sw, port);
Mika Westerberg77cfa402020-03-11 16:00:46 +0300268 if (down && !tb_usb3_port_is_enabled(down))
Rajmohan Manie6f81852019-12-17 15:33:44 +0300269 return down;
Mika Westerberg77cfa402020-03-11 16:00:46 +0300270 return NULL;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300271}
272
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200273static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
274 struct tb_port *src_port,
275 struct tb_port *dst_port)
276{
277 struct tb_cm *tcm = tb_priv(tb);
278 struct tb_tunnel *tunnel;
279
280 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
281 if (tunnel->type == type &&
282 ((src_port && src_port == tunnel->src_port) ||
283 (dst_port && dst_port == tunnel->dst_port))) {
284 return tunnel;
285 }
286 }
287
288 return NULL;
289}
290
291static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
292 struct tb_port *src_port,
293 struct tb_port *dst_port)
294{
295 struct tb_port *port, *usb3_down;
296 struct tb_switch *sw;
297
298 /* Pick the router that is deepest in the topology */
299 if (dst_port->sw->config.depth > src_port->sw->config.depth)
300 sw = dst_port->sw;
301 else
302 sw = src_port->sw;
303
304 /* Can't be the host router */
305 if (sw == tb->root_switch)
306 return NULL;
307
308 /* Find the downstream USB4 port that leads to this router */
309 port = tb_port_at(tb_route(sw), tb->root_switch);
310 /* Find the corresponding host router USB3 downstream port */
311 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
312 if (!usb3_down)
313 return NULL;
314
315 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
316}
317
318static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
319 struct tb_port *dst_port, int *available_up, int *available_down)
320{
321 int usb3_consumed_up, usb3_consumed_down, ret;
322 struct tb_cm *tcm = tb_priv(tb);
323 struct tb_tunnel *tunnel;
324 struct tb_port *port;
325
326 tb_port_dbg(dst_port, "calculating available bandwidth\n");
327
328 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
329 if (tunnel) {
330 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
331 &usb3_consumed_down);
332 if (ret)
333 return ret;
334 } else {
335 usb3_consumed_up = 0;
336 usb3_consumed_down = 0;
337 }
338
339 *available_up = *available_down = 40000;
340
341 /* Find the minimum available bandwidth over all links */
342 tb_for_each_port_on_path(src_port, dst_port, port) {
343 int link_speed, link_width, up_bw, down_bw;
344
345 if (!tb_port_is_null(port))
346 continue;
347
348 if (tb_is_upstream_port(port)) {
349 link_speed = port->sw->link_speed;
350 } else {
351 link_speed = tb_port_get_link_speed(port);
352 if (link_speed < 0)
353 return link_speed;
354 }
355
356 link_width = port->bonded ? 2 : 1;
357
358 up_bw = link_speed * link_width * 1000; /* Mb/s */
359 /* Leave 10% guard band */
360 up_bw -= up_bw / 10;
361 down_bw = up_bw;
362
363 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
364
365 /*
366 * Find all DP tunnels that cross the port and reduce
367 * their consumed bandwidth from the available.
368 */
369 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
370 int dp_consumed_up, dp_consumed_down;
371
372 if (!tb_tunnel_is_dp(tunnel))
373 continue;
374
375 if (!tb_tunnel_port_on_path(tunnel, port))
376 continue;
377
378 ret = tb_tunnel_consumed_bandwidth(tunnel,
379 &dp_consumed_up,
380 &dp_consumed_down);
381 if (ret)
382 return ret;
383
384 up_bw -= dp_consumed_up;
385 down_bw -= dp_consumed_down;
386 }
387
388 /*
389 * If USB3 is tunneled from the host router down to the
390 * branch leading to port we need to take USB3 consumed
391 * bandwidth into account regardless whether it actually
392 * crosses the port.
393 */
394 up_bw -= usb3_consumed_up;
395 down_bw -= usb3_consumed_down;
396
397 if (up_bw < *available_up)
398 *available_up = up_bw;
399 if (down_bw < *available_down)
400 *available_down = down_bw;
401 }
402
403 if (*available_up < 0)
404 *available_up = 0;
405 if (*available_down < 0)
406 *available_down = 0;
407
408 return 0;
409}
410
411static int tb_release_unused_usb3_bandwidth(struct tb *tb,
412 struct tb_port *src_port,
413 struct tb_port *dst_port)
414{
415 struct tb_tunnel *tunnel;
416
417 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
418 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
419}
420
421static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
422 struct tb_port *dst_port)
423{
424 int ret, available_up, available_down;
425 struct tb_tunnel *tunnel;
426
427 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
428 if (!tunnel)
429 return;
430
431 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
432
433 /*
434 * Calculate available bandwidth for the first hop USB3 tunnel.
435 * That determines the whole USB3 bandwidth for this branch.
436 */
437 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
438 &available_up, &available_down);
439 if (ret) {
440 tb_warn(tb, "failed to calculate available bandwidth\n");
441 return;
442 }
443
444 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
445 available_up, available_down);
446
447 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
448}
449
Rajmohan Manie6f81852019-12-17 15:33:44 +0300450static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
451{
452 struct tb_switch *parent = tb_switch_parent(sw);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200453 int ret, available_up, available_down;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300454 struct tb_port *up, *down, *port;
455 struct tb_cm *tcm = tb_priv(tb);
456 struct tb_tunnel *tunnel;
457
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200458 if (!tb_acpi_may_tunnel_usb3()) {
459 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
460 return 0;
461 }
462
Rajmohan Manie6f81852019-12-17 15:33:44 +0300463 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
464 if (!up)
465 return 0;
466
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200467 if (!sw->link_usb4)
468 return 0;
469
Rajmohan Manie6f81852019-12-17 15:33:44 +0300470 /*
471 * Look up available down port. Since we are chaining it should
472 * be found right above this switch.
473 */
474 port = tb_port_at(tb_route(sw), parent);
475 down = tb_find_usb3_down(parent, port);
476 if (!down)
477 return 0;
478
479 if (tb_route(parent)) {
480 struct tb_port *parent_up;
481 /*
482 * Check first that the parent switch has its upstream USB3
483 * port enabled. Otherwise the chain is not complete and
484 * there is no point setting up a new tunnel.
485 */
486 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
487 if (!parent_up || !tb_port_is_enabled(parent_up))
488 return 0;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200489
490 /* Make all unused bandwidth available for the new tunnel */
491 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
492 if (ret)
493 return ret;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300494 }
495
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200496 ret = tb_available_bandwidth(tb, down, up, &available_up,
497 &available_down);
498 if (ret)
499 goto err_reclaim;
500
501 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
502 available_up, available_down);
503
504 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
505 available_down);
506 if (!tunnel) {
507 ret = -ENOMEM;
508 goto err_reclaim;
509 }
Rajmohan Manie6f81852019-12-17 15:33:44 +0300510
511 if (tb_tunnel_activate(tunnel)) {
512 tb_port_info(up,
513 "USB3 tunnel activation failed, aborting\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200514 ret = -EIO;
515 goto err_free;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300516 }
517
518 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200519 if (tb_route(parent))
520 tb_reclaim_usb3_bandwidth(tb, down, up);
521
Rajmohan Manie6f81852019-12-17 15:33:44 +0300522 return 0;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200523
524err_free:
525 tb_tunnel_free(tunnel);
526err_reclaim:
527 if (tb_route(parent))
528 tb_reclaim_usb3_bandwidth(tb, down, up);
529
530 return ret;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300531}
532
533static int tb_create_usb3_tunnels(struct tb_switch *sw)
534{
535 struct tb_port *port;
536 int ret;
537
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200538 if (!tb_acpi_may_tunnel_usb3())
539 return 0;
540
Rajmohan Manie6f81852019-12-17 15:33:44 +0300541 if (tb_route(sw)) {
542 ret = tb_tunnel_usb3(sw->tb, sw);
543 if (ret)
544 return ret;
545 }
546
547 tb_switch_for_each_port(sw, port) {
548 if (!tb_port_has_remote(port))
549 continue;
550 ret = tb_create_usb3_tunnels(port->remote->sw);
551 if (ret)
552 return ret;
553 }
554
555 return 0;
556}
557
Andreas Noever9da672a2014-06-03 22:04:05 +0200558static void tb_scan_port(struct tb_port *port);
559
Lee Jones877e50b2021-01-27 11:25:50 +0000560/*
Andreas Noever9da672a2014-06-03 22:04:05 +0200561 * tb_scan_switch() - scan for and initialize downstream switches
562 */
563static void tb_scan_switch(struct tb_switch *sw)
564{
Mika Westerbergb433d012019-09-30 14:07:22 +0300565 struct tb_port *port;
566
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300567 pm_runtime_get_sync(&sw->dev);
568
Mika Westerbergb433d012019-09-30 14:07:22 +0300569 tb_switch_for_each_port(sw, port)
570 tb_scan_port(port);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300571
572 pm_runtime_mark_last_busy(&sw->dev);
573 pm_runtime_put_autosuspend(&sw->dev);
Andreas Noever9da672a2014-06-03 22:04:05 +0200574}
575
Lee Jones877e50b2021-01-27 11:25:50 +0000576/*
Andreas Noever9da672a2014-06-03 22:04:05 +0200577 * tb_scan_port() - check for and initialize switches below port
578 */
579static void tb_scan_port(struct tb_port *port)
580{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200581 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200582 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200583 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200584
Andreas Noever9da672a2014-06-03 22:04:05 +0200585 if (tb_is_upstream_port(port))
586 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300587
588 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
589 !tb_dp_port_is_enabled(port)) {
590 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
591 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
592 false);
593 return;
594 }
595
Andreas Noever9da672a2014-06-03 22:04:05 +0200596 if (port->config.type != TB_TYPE_PORT)
597 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200598 if (port->dual_link_port && port->link_nr)
599 return; /*
600 * Downstream switch is reachable through two ports.
601 * Only scan on the primary port (link_nr == 0).
602 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200603 if (tb_wait_for_port(port, false) <= 0)
604 return;
605 if (port->remote) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300606 tb_port_dbg(port, "port already has a remote\n");
Andreas Noever9da672a2014-06-03 22:04:05 +0200607 return;
608 }
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200609
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300610 tb_retimer_scan(port, true);
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200611
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300612 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
613 tb_downstream_route(port));
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300614 if (IS_ERR(sw)) {
615 /*
616 * If there is an error accessing the connected switch
617 * it may be connected to another domain. Also we allow
618 * the other domain to be connected to a max depth switch.
619 */
620 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
621 tb_scan_xdomain(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200622 return;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300623 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300624
625 if (tb_switch_configure(sw)) {
626 tb_switch_put(sw);
627 return;
628 }
629
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200630 /*
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300631 * If there was previously another domain connected remove it
632 * first.
633 */
634 if (port->xdomain) {
635 tb_xdomain_remove(port->xdomain);
Mika Westerberg284652a2020-04-09 14:23:32 +0300636 tb_port_unconfigure_xdomain(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300637 port->xdomain = NULL;
638 }
639
640 /*
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200641 * Do not send uevents until we have discovered all existing
642 * tunnels and know which switches were authorized already by
643 * the boot firmware.
644 */
645 if (!tcm->hotplug_active)
646 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300647
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300648 /*
649 * At the moment Thunderbolt 2 and beyond (devices with LC) we
650 * can support runtime PM.
651 */
652 sw->rpm = sw->generation > 1;
653
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300654 if (tb_switch_add(sw)) {
655 tb_switch_put(sw);
656 return;
657 }
658
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200659 /* Link the switches using both links if available */
660 upstream_port = tb_upstream_port(sw);
661 port->remote = upstream_port;
662 upstream_port->remote = port;
663 if (port->dual_link_port && upstream_port->dual_link_port) {
664 port->dual_link_port->remote = upstream_port->dual_link_port;
665 upstream_port->dual_link_port->remote = port->dual_link_port;
666 }
667
Mika Westerberg91c0c122019-03-21 19:03:00 +0200668 /* Enable lane bonding if supported */
Mika Westerberg2ca32632020-04-02 14:28:18 +0300669 tb_switch_lane_bonding_enable(sw);
Mika Westerbergde462032020-04-02 14:50:52 +0300670 /* Set the link configured */
671 tb_switch_configure_link(sw);
Gil Fine8a90e4f2021-12-17 03:16:39 +0200672 if (tb_switch_enable_clx(sw, TB_CL0S))
673 tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
674
675 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
676 tb_switch_is_clx_enabled(sw));
Mika Westerberg91c0c122019-03-21 19:03:00 +0200677
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300678 if (tb_enable_tmu(sw))
679 tb_sw_warn(sw, "failed to enable TMU\n");
680
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200681 /* Scan upstream retimers */
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300682 tb_retimer_scan(upstream_port, true);
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200683
Rajmohan Manie6f81852019-12-17 15:33:44 +0300684 /*
685 * Create USB 3.x tunnels only when the switch is plugged to the
686 * domain. This is because we scan the domain also during discovery
687 * and want to discover existing USB 3.x tunnels before we create
688 * any new.
689 */
690 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
691 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
692
Mika Westerberge876f342020-04-02 12:53:14 +0300693 tb_add_dp_resources(sw);
Andreas Noever9da672a2014-06-03 22:04:05 +0200694 tb_scan_switch(sw);
695}
696
Mika Westerberg8afe9092019-03-26 15:52:30 +0300697static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
698{
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200699 struct tb_port *src_port, *dst_port;
700 struct tb *tb;
701
Mika Westerberg8afe9092019-03-26 15:52:30 +0300702 if (!tunnel)
703 return;
704
705 tb_tunnel_deactivate(tunnel);
706 list_del(&tunnel->list);
707
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200708 tb = tunnel->tb;
709 src_port = tunnel->src_port;
710 dst_port = tunnel->dst_port;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300711
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200712 switch (tunnel->type) {
713 case TB_TUNNEL_DP:
714 /*
715 * In case of DP tunnel make sure the DP IN resource is
716 * deallocated properly.
717 */
718 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300719 /* Now we can allow the domain to runtime suspend again */
720 pm_runtime_mark_last_busy(&dst_port->sw->dev);
721 pm_runtime_put_autosuspend(&dst_port->sw->dev);
722 pm_runtime_mark_last_busy(&src_port->sw->dev);
723 pm_runtime_put_autosuspend(&src_port->sw->dev);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200724 fallthrough;
725
726 case TB_TUNNEL_USB3:
727 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
728 break;
729
730 default:
731 /*
732 * PCIe and DMA tunnels do not consume guaranteed
733 * bandwidth.
734 */
735 break;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300736 }
737
738 tb_tunnel_free(tunnel);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300739}
740
Lee Jones877e50b2021-01-27 11:25:50 +0000741/*
Andreas Noever3364f0c2014-06-03 22:04:08 +0200742 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
743 */
744static void tb_free_invalid_tunnels(struct tb *tb)
745{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300746 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200747 struct tb_tunnel *tunnel;
748 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300749
750 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300751 if (tb_tunnel_is_invalid(tunnel))
752 tb_deactivate_and_free_tunnel(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200753 }
754}
755
Lee Jones877e50b2021-01-27 11:25:50 +0000756/*
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200757 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
758 */
759static void tb_free_unplugged_children(struct tb_switch *sw)
760{
Mika Westerbergb433d012019-09-30 14:07:22 +0300761 struct tb_port *port;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200762
Mika Westerbergb433d012019-09-30 14:07:22 +0300763 tb_switch_for_each_port(sw, port) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200764 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200765 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200766
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200767 if (port->remote->sw->is_unplugged) {
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200768 tb_retimer_remove_all(port);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300769 tb_remove_dp_resources(port->remote->sw);
Mika Westerbergde462032020-04-02 14:50:52 +0300770 tb_switch_unconfigure_link(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200771 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300772 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200773 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200774 if (port->dual_link_port)
775 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200776 } else {
777 tb_free_unplugged_children(port->remote->sw);
778 }
779 }
780}
781
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200782static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
783 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200784{
Mika Westerbergb0407982019-12-17 15:33:40 +0300785 struct tb_port *down = NULL;
786
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200787 /*
788 * To keep plugging devices consistently in the same PCIe
Mika Westerbergb0407982019-12-17 15:33:40 +0300789 * hierarchy, do mapping here for switch downstream PCIe ports.
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200790 */
Mika Westerbergb0407982019-12-17 15:33:40 +0300791 if (tb_switch_is_usb4(sw)) {
792 down = usb4_switch_map_pcie_down(sw, port);
793 } else if (!tb_route(sw)) {
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200794 int phy_port = tb_phy_port_from_link(port->port);
795 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300796
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200797 /*
798 * Hard-coded Thunderbolt port to PCIe down port mapping
799 * per controller.
800 */
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200801 if (tb_switch_is_cactus_ridge(sw) ||
802 tb_switch_is_alpine_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200803 index = !phy_port ? 6 : 7;
Mika Westerberg17a8f812019-10-08 16:42:47 +0300804 else if (tb_switch_is_falcon_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200805 index = !phy_port ? 6 : 8;
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200806 else if (tb_switch_is_titan_ridge(sw))
807 index = !phy_port ? 8 : 9;
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200808 else
809 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200810
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200811 /* Validate the hard-coding */
812 if (WARN_ON(index > sw->config.max_port_number))
813 goto out;
Mika Westerbergb0407982019-12-17 15:33:40 +0300814
815 down = &sw->ports[index];
816 }
817
818 if (down) {
819 if (WARN_ON(!tb_port_is_pcie_down(down)))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200820 goto out;
Mika Westerberg9cac51a2020-03-11 16:12:50 +0300821 if (tb_pci_port_is_enabled(down))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200822 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200823
Mika Westerbergb0407982019-12-17 15:33:40 +0300824 return down;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200825 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200826
827out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300828 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200829}
830
Mika Westerberge876f342020-04-02 12:53:14 +0300831static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
832{
833 struct tb_port *host_port, *port;
834 struct tb_cm *tcm = tb_priv(tb);
835
836 host_port = tb_route(in->sw) ?
837 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
838
839 list_for_each_entry(port, &tcm->dp_resources, list) {
840 if (!tb_port_is_dpout(port))
841 continue;
842
843 if (tb_port_is_enabled(port)) {
844 tb_port_dbg(port, "in use\n");
845 continue;
846 }
847
848 tb_port_dbg(port, "DP OUT available\n");
849
850 /*
851 * Keep the DP tunnel under the topology starting from
852 * the same host router downstream port.
853 */
854 if (host_port && tb_route(port->sw)) {
855 struct tb_port *p;
856
857 p = tb_port_at(tb_route(port->sw), tb->root_switch);
858 if (p != host_port)
859 continue;
860 }
861
862 return port;
863 }
864
865 return NULL;
866}
867
Mika Westerberg8afe9092019-03-26 15:52:30 +0300868static void tb_tunnel_dp(struct tb *tb)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300869{
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200870 int available_up, available_down, ret;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300871 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300872 struct tb_port *port, *in, *out;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300873 struct tb_tunnel *tunnel;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300874
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200875 if (!tb_acpi_may_tunnel_dp()) {
876 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
877 return;
878 }
879
Mika Westerberg8afe9092019-03-26 15:52:30 +0300880 /*
881 * Find pair of inactive DP IN and DP OUT adapters and then
882 * establish a DP tunnel between them.
883 */
884 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300885
Mika Westerberg8afe9092019-03-26 15:52:30 +0300886 in = NULL;
887 out = NULL;
888 list_for_each_entry(port, &tcm->dp_resources, list) {
Mika Westerberge876f342020-04-02 12:53:14 +0300889 if (!tb_port_is_dpin(port))
890 continue;
891
Mika Westerberg8afe9092019-03-26 15:52:30 +0300892 if (tb_port_is_enabled(port)) {
893 tb_port_dbg(port, "in use\n");
894 continue;
895 }
896
Mika Westerberge876f342020-04-02 12:53:14 +0300897 tb_port_dbg(port, "DP IN available\n");
Mika Westerberg8afe9092019-03-26 15:52:30 +0300898
Mika Westerberge876f342020-04-02 12:53:14 +0300899 out = tb_find_dp_out(tb, port);
900 if (out) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300901 in = port;
Mika Westerberge876f342020-04-02 12:53:14 +0300902 break;
903 }
Mika Westerberg8afe9092019-03-26 15:52:30 +0300904 }
905
906 if (!in) {
907 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
908 return;
909 }
910 if (!out) {
911 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
912 return;
913 }
914
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300915 /*
916 * DP stream needs the domain to be active so runtime resume
917 * both ends of the tunnel.
918 *
919 * This should bring the routers in the middle active as well
920 * and keeps the domain from runtime suspending while the DP
921 * tunnel is active.
922 */
923 pm_runtime_get_sync(&in->sw->dev);
924 pm_runtime_get_sync(&out->sw->dev);
925
Mika Westerberg8afe9092019-03-26 15:52:30 +0300926 if (tb_switch_alloc_dp_resource(in->sw, in)) {
927 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300928 goto err_rpm_put;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300929 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300930
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200931 /* Make all unused USB3 bandwidth available for the new DP tunnel */
932 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
933 if (ret) {
934 tb_warn(tb, "failed to release unused bandwidth\n");
935 goto err_dealloc_dp;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300936 }
937
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200938 ret = tb_available_bandwidth(tb, in, out, &available_up,
939 &available_down);
940 if (ret)
941 goto err_reclaim;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300942
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200943 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
944 available_up, available_down);
945
946 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300947 if (!tunnel) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300948 tb_port_dbg(out, "could not allocate DP tunnel\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200949 goto err_reclaim;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300950 }
951
952 if (tb_tunnel_activate(tunnel)) {
953 tb_port_info(out, "DP tunnel activation failed, aborting\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200954 goto err_free;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300955 }
956
957 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200958 tb_reclaim_usb3_bandwidth(tb, in, out);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300959 return;
960
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200961err_free:
962 tb_tunnel_free(tunnel);
963err_reclaim:
964 tb_reclaim_usb3_bandwidth(tb, in, out);
965err_dealloc_dp:
Mika Westerberg8afe9092019-03-26 15:52:30 +0300966 tb_switch_dealloc_dp_resource(in->sw, in);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300967err_rpm_put:
968 pm_runtime_mark_last_busy(&out->sw->dev);
969 pm_runtime_put_autosuspend(&out->sw->dev);
970 pm_runtime_mark_last_busy(&in->sw->dev);
971 pm_runtime_put_autosuspend(&in->sw->dev);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300972}
973
Mika Westerberg8afe9092019-03-26 15:52:30 +0300974static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300975{
Mika Westerberg8afe9092019-03-26 15:52:30 +0300976 struct tb_port *in, *out;
977 struct tb_tunnel *tunnel;
978
979 if (tb_port_is_dpin(port)) {
980 tb_port_dbg(port, "DP IN resource unavailable\n");
981 in = port;
982 out = NULL;
983 } else {
984 tb_port_dbg(port, "DP OUT resource unavailable\n");
985 in = NULL;
986 out = port;
987 }
988
989 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
990 tb_deactivate_and_free_tunnel(tunnel);
991 list_del_init(&port->list);
992
993 /*
994 * See if there is another DP OUT port that can be used for
995 * to create another tunnel.
996 */
997 tb_tunnel_dp(tb);
998}
999
1000static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1001{
1002 struct tb_cm *tcm = tb_priv(tb);
1003 struct tb_port *p;
1004
1005 if (tb_port_is_enabled(port))
1006 return;
1007
1008 list_for_each_entry(p, &tcm->dp_resources, list) {
1009 if (p == port)
1010 return;
1011 }
1012
1013 tb_port_dbg(port, "DP %s resource available\n",
1014 tb_port_is_dpin(port) ? "IN" : "OUT");
1015 list_add_tail(&port->list, &tcm->dp_resources);
1016
1017 /* Look for suitable DP IN <-> DP OUT pairs now */
1018 tb_tunnel_dp(tb);
Mika Westerberg4f807e42018-09-17 16:30:49 +03001019}
1020
Mika Westerberg81a2e3e2020-05-16 16:20:39 +03001021static void tb_disconnect_and_release_dp(struct tb *tb)
1022{
1023 struct tb_cm *tcm = tb_priv(tb);
1024 struct tb_tunnel *tunnel, *n;
1025
1026 /*
1027 * Tear down all DP tunnels and release their resources. They
1028 * will be re-established after resume based on plug events.
1029 */
1030 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1031 if (tb_tunnel_is_dp(tunnel))
1032 tb_deactivate_and_free_tunnel(tunnel);
1033 }
1034
1035 while (!list_empty(&tcm->dp_resources)) {
1036 struct tb_port *port;
1037
1038 port = list_first_entry(&tcm->dp_resources,
1039 struct tb_port, list);
1040 list_del_init(&port->list);
1041 }
1042}
1043
Mika Westerberg3da88be2020-11-10 11:47:14 +03001044static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1045{
1046 struct tb_tunnel *tunnel;
1047 struct tb_port *up;
1048
1049 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1050 if (WARN_ON(!up))
1051 return -ENODEV;
1052
1053 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1054 if (WARN_ON(!tunnel))
1055 return -ENODEV;
1056
1057 tb_tunnel_deactivate(tunnel);
1058 list_del(&tunnel->list);
1059 tb_tunnel_free(tunnel);
1060 return 0;
1061}
1062
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001063static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1064{
1065 struct tb_port *up, *down, *port;
1066 struct tb_cm *tcm = tb_priv(tb);
1067 struct tb_switch *parent_sw;
1068 struct tb_tunnel *tunnel;
1069
Mika Westerberg386e5e22019-12-17 15:33:37 +03001070 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001071 if (!up)
1072 return 0;
1073
1074 /*
1075 * Look up available down port. Since we are chaining it should
1076 * be found right above this switch.
1077 */
1078 parent_sw = tb_to_switch(sw->dev.parent);
1079 port = tb_port_at(tb_route(sw), parent_sw);
1080 down = tb_find_pcie_down(parent_sw, port);
1081 if (!down)
1082 return 0;
1083
1084 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1085 if (!tunnel)
1086 return -ENOMEM;
1087
1088 if (tb_tunnel_activate(tunnel)) {
1089 tb_port_info(up,
1090 "PCIe tunnel activation failed, aborting\n");
1091 tb_tunnel_free(tunnel);
1092 return -EIO;
1093 }
1094
Gil Fine43f977b2021-12-17 03:16:43 +02001095 /*
1096 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1097 * here.
1098 */
1099 if (tb_switch_pcie_l1_enable(sw))
1100 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1101
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001102 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1103 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001104}
Andreas Noever9da672a2014-06-03 22:04:05 +02001105
Mika Westerberg180b0682021-01-08 16:25:39 +02001106static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1107 int transmit_path, int transmit_ring,
1108 int receive_path, int receive_ring)
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001109{
1110 struct tb_cm *tcm = tb_priv(tb);
1111 struct tb_port *nhi_port, *dst_port;
1112 struct tb_tunnel *tunnel;
1113 struct tb_switch *sw;
1114
1115 sw = tb_to_switch(xd->dev.parent);
1116 dst_port = tb_port_at(xd->route, sw);
Mika Westerberg386e5e22019-12-17 15:33:37 +03001117 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001118
1119 mutex_lock(&tb->lock);
Mika Westerberg180b0682021-01-08 16:25:39 +02001120 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1121 transmit_ring, receive_path, receive_ring);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001122 if (!tunnel) {
1123 mutex_unlock(&tb->lock);
1124 return -ENOMEM;
1125 }
1126
1127 if (tb_tunnel_activate(tunnel)) {
1128 tb_port_info(nhi_port,
1129 "DMA tunnel activation failed, aborting\n");
1130 tb_tunnel_free(tunnel);
1131 mutex_unlock(&tb->lock);
1132 return -EIO;
1133 }
1134
1135 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1136 mutex_unlock(&tb->lock);
1137 return 0;
1138}
1139
Mika Westerberg180b0682021-01-08 16:25:39 +02001140static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1141 int transmit_path, int transmit_ring,
1142 int receive_path, int receive_ring)
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001143{
Mika Westerberg180b0682021-01-08 16:25:39 +02001144 struct tb_cm *tcm = tb_priv(tb);
1145 struct tb_port *nhi_port, *dst_port;
1146 struct tb_tunnel *tunnel, *n;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001147 struct tb_switch *sw;
1148
1149 sw = tb_to_switch(xd->dev.parent);
1150 dst_port = tb_port_at(xd->route, sw);
Mika Westerberg180b0682021-01-08 16:25:39 +02001151 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001152
Mika Westerberg180b0682021-01-08 16:25:39 +02001153 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1154 if (!tb_tunnel_is_dma(tunnel))
1155 continue;
1156 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1157 continue;
1158
1159 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1160 receive_path, receive_ring))
1161 tb_deactivate_and_free_tunnel(tunnel);
1162 }
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001163}
1164
Mika Westerberg180b0682021-01-08 16:25:39 +02001165static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1166 int transmit_path, int transmit_ring,
1167 int receive_path, int receive_ring)
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001168{
1169 if (!xd->is_unplugged) {
1170 mutex_lock(&tb->lock);
Mika Westerberg180b0682021-01-08 16:25:39 +02001171 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1172 transmit_ring, receive_path,
1173 receive_ring);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001174 mutex_unlock(&tb->lock);
1175 }
1176 return 0;
1177}
1178
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001179/* hotplug handling */
1180
Lee Jones877e50b2021-01-27 11:25:50 +00001181/*
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001182 * tb_handle_hotplug() - handle hotplug event
1183 *
1184 * Executes on tb->wq.
1185 */
1186static void tb_handle_hotplug(struct work_struct *work)
1187{
1188 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1189 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001190 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +02001191 struct tb_switch *sw;
1192 struct tb_port *port;
Mika Westerberg284652a2020-04-09 14:23:32 +03001193
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001194 /* Bring the domain back from sleep if it was suspended */
1195 pm_runtime_get_sync(&tb->dev);
1196
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001197 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001198 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001199 goto out; /* during init, suspend or shutdown */
1200
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001201 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +02001202 if (!sw) {
1203 tb_warn(tb,
1204 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1205 ev->route, ev->port, ev->unplug);
1206 goto out;
1207 }
1208 if (ev->port > sw->config.max_port_number) {
1209 tb_warn(tb,
1210 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1211 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001212 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +02001213 }
1214 port = &sw->ports[ev->port];
1215 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +02001216 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1217 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001218 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +02001219 }
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001220
1221 pm_runtime_get_sync(&sw->dev);
1222
Andreas Noever053596d2014-06-03 22:04:06 +02001223 if (ev->unplug) {
Kranthi Kuntaladacb1282020-03-05 16:39:58 +02001224 tb_retimer_remove_all(port);
1225
Mika Westerbergdfe40ca2019-03-07 15:26:45 +02001226 if (tb_port_has_remote(port)) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001227 tb_port_dbg(port, "switch unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +01001228 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001229 tb_free_invalid_tunnels(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001230 tb_remove_dp_resources(port->remote->sw);
Rajmohan Manicf29b9af2019-12-17 15:33:43 +03001231 tb_switch_tmu_disable(port->remote->sw);
Mika Westerbergde462032020-04-02 14:50:52 +03001232 tb_switch_unconfigure_link(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +02001233 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001234 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +02001235 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +02001236 if (port->dual_link_port)
1237 port->dual_link_port->remote = NULL;
Mika Westerberg8afe9092019-03-26 15:52:30 +03001238 /* Maybe we can create another DP tunnel */
1239 tb_tunnel_dp(tb);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001240 } else if (port->xdomain) {
1241 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1242
1243 tb_port_dbg(port, "xdomain unplugged\n");
1244 /*
1245 * Service drivers are unbound during
1246 * tb_xdomain_remove() so setting XDomain as
1247 * unplugged here prevents deadlock if they call
1248 * tb_xdomain_disable_paths(). We will tear down
Mika Westerberg180b0682021-01-08 16:25:39 +02001249 * all the tunnels below.
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001250 */
1251 xd->is_unplugged = true;
1252 tb_xdomain_remove(xd);
1253 port->xdomain = NULL;
Mika Westerberg180b0682021-01-08 16:25:39 +02001254 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001255 tb_xdomain_put(xd);
Mika Westerberg284652a2020-04-09 14:23:32 +03001256 tb_port_unconfigure_xdomain(port);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001257 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1258 tb_dp_resource_unavailable(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +02001259 } else {
Mika Westerberg62efe692018-09-17 16:32:13 +03001260 tb_port_dbg(port,
1261 "got unplug event for disconnected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +02001262 }
1263 } else if (port->remote) {
Mika Westerberg62efe692018-09-17 16:32:13 +03001264 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +02001265 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +03001266 if (tb_port_is_null(port)) {
Mika Westerberg62efe692018-09-17 16:32:13 +03001267 tb_port_dbg(port, "hotplug: scanning\n");
Mika Westerberg344e0642017-10-11 17:19:54 +03001268 tb_scan_port(port);
1269 if (!port->remote)
Mika Westerberg62efe692018-09-17 16:32:13 +03001270 tb_port_dbg(port, "hotplug: no switch found\n");
Mika Westerberg8afe9092019-03-26 15:52:30 +03001271 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1272 tb_dp_resource_available(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +03001273 }
Andreas Noever053596d2014-06-03 22:04:06 +02001274 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001275
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001276 pm_runtime_mark_last_busy(&sw->dev);
1277 pm_runtime_put_autosuspend(&sw->dev);
1278
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001279put_sw:
1280 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001281out:
1282 mutex_unlock(&tb->lock);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001283
1284 pm_runtime_mark_last_busy(&tb->dev);
1285 pm_runtime_put_autosuspend(&tb->dev);
1286
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001287 kfree(ev);
1288}
1289
Lee Jones877e50b2021-01-27 11:25:50 +00001290/*
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001291 * tb_schedule_hotplug_handler() - callback function for the control channel
1292 *
1293 * Delegates to tb_handle_hotplug.
1294 */
Mika Westerberg81a54b52017-06-06 15:25:09 +03001295static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1296 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001297{
Mika Westerberg81a54b52017-06-06 15:25:09 +03001298 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +03001299 u64 route;
1300
1301 if (type != TB_CFG_PKG_EVENT) {
1302 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1303 return;
1304 }
1305
1306 route = tb_cfg_get_route(&pkg->header);
1307
Mika Westerberg210e9f52019-12-17 15:33:39 +03001308 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
Mika Westerberg81a54b52017-06-06 15:25:09 +03001309 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1310 pkg->port);
1311 }
1312
Mika Westerberg4f807e42018-09-17 16:30:49 +03001313 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001314}
1315
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001316static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001317{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001318 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001319 struct tb_tunnel *tunnel;
1320 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001321
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001322 cancel_delayed_work(&tcm->remove_work);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001323 /* tunnels are only present after everything has been initialized */
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001324 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1325 /*
1326 * DMA tunnels require the driver to be functional so we
1327 * tear them down. Other protocol tunnels can be left
1328 * intact.
1329 */
1330 if (tb_tunnel_is_dma(tunnel))
1331 tb_tunnel_deactivate(tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001332 tb_tunnel_free(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001333 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001334 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001335 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001336}
1337
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001338static int tb_scan_finalize_switch(struct device *dev, void *data)
1339{
1340 if (tb_is_switch(dev)) {
1341 struct tb_switch *sw = tb_to_switch(dev);
1342
1343 /*
1344 * If we found that the switch was already setup by the
1345 * boot firmware, mark it as authorized now before we
1346 * send uevent to userspace.
1347 */
1348 if (sw->boot)
1349 sw->authorized = 1;
1350
1351 dev_set_uevent_suppress(dev, false);
1352 kobject_uevent(&dev->kobj, KOBJ_ADD);
1353 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1354 }
1355
1356 return 0;
1357}
1358
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001359static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001360{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001361 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001362 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001363
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001364 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Mika Westerberg444ac382018-12-30 12:17:52 +02001365 if (IS_ERR(tb->root_switch))
1366 return PTR_ERR(tb->root_switch);
Andreas Noevera25c8b22014-06-03 22:04:02 +02001367
Mika Westerberge6b245c2017-06-06 15:25:17 +03001368 /*
1369 * ICM firmware upgrade needs running firmware and in native
1370 * mode that is not available so disable firmware upgrade of the
1371 * root switch.
1372 */
1373 tb->root_switch->no_nvm_upgrade = true;
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001374 /* All USB4 routers support runtime PM */
1375 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
Mika Westerberge6b245c2017-06-06 15:25:17 +03001376
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001377 ret = tb_switch_configure(tb->root_switch);
1378 if (ret) {
1379 tb_switch_put(tb->root_switch);
1380 return ret;
1381 }
1382
1383 /* Announce the switch to the world */
1384 ret = tb_switch_add(tb->root_switch);
1385 if (ret) {
1386 tb_switch_put(tb->root_switch);
1387 return ret;
1388 }
1389
Gil Finea28ec0e2021-12-17 03:16:38 +02001390 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
Rajmohan Manicf29b9af2019-12-17 15:33:43 +03001391 /* Enable TMU if it is off */
1392 tb_switch_tmu_enable(tb->root_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +02001393 /* Full scan to discover devices added before the driver was loaded. */
1394 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +02001395 /* Find out tunnels created by the boot firmware */
Mika Westerberg43bddb22021-11-14 17:20:59 +02001396 tb_discover_tunnels(tb);
Rajmohan Manie6f81852019-12-17 15:33:44 +03001397 /*
1398 * If the boot firmware did not create USB 3.x tunnels create them
1399 * now for the whole topology.
1400 */
1401 tb_create_usb3_tunnels(tb->root_switch);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001402 /* Add DP IN resources for the root switch */
1403 tb_add_dp_resources(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001404 /* Make the discovered switches available to the userspace */
1405 device_for_each_child(&tb->root_switch->dev, NULL,
1406 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +02001407
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001408 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001409 tcm->hotplug_active = true;
1410 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001411}
1412
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001413static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001414{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001415 struct tb_cm *tcm = tb_priv(tb);
1416
Mika Westerbergdaa51402018-10-01 12:31:19 +03001417 tb_dbg(tb, "suspending...\n");
Mika Westerberg81a2e3e2020-05-16 16:20:39 +03001418 tb_disconnect_and_release_dp(tb);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001419 tb_switch_suspend(tb->root_switch, false);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001420 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +03001421 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001422
1423 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001424}
1425
Mika Westerberg91c0c122019-03-21 19:03:00 +02001426static void tb_restore_children(struct tb_switch *sw)
1427{
1428 struct tb_port *port;
1429
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001430 /* No need to restore if the router is already unplugged */
1431 if (sw->is_unplugged)
1432 return;
1433
Gil Fine8a90e4f2021-12-17 03:16:39 +02001434 if (tb_switch_enable_clx(sw, TB_CL0S))
1435 tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
1436
Gil Finea28ec0e2021-12-17 03:16:38 +02001437 /*
1438 * tb_switch_tmu_configure() was already called when the switch was
1439 * added before entering system sleep or runtime suspend,
1440 * so no need to call it again before enabling TMU.
1441 */
Rajmohan Manicf29b9af2019-12-17 15:33:43 +03001442 if (tb_enable_tmu(sw))
1443 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1444
Mika Westerberg91c0c122019-03-21 19:03:00 +02001445 tb_switch_for_each_port(sw, port) {
Mika Westerberg284652a2020-04-09 14:23:32 +03001446 if (!tb_port_has_remote(port) && !port->xdomain)
Mika Westerberg91c0c122019-03-21 19:03:00 +02001447 continue;
1448
Mika Westerberg284652a2020-04-09 14:23:32 +03001449 if (port->remote) {
1450 tb_switch_lane_bonding_enable(port->remote->sw);
1451 tb_switch_configure_link(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +02001452
Mika Westerberg284652a2020-04-09 14:23:32 +03001453 tb_restore_children(port->remote->sw);
1454 } else if (port->xdomain) {
1455 tb_port_configure_xdomain(port);
1456 }
Mika Westerberg91c0c122019-03-21 19:03:00 +02001457 }
1458}
1459
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001460static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001461{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001462 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001463 struct tb_tunnel *tunnel, *n;
Mika Westerberg43bddb22021-11-14 17:20:59 +02001464 unsigned int usb3_delay = 0;
1465 LIST_HEAD(tunnels);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001466
Mika Westerbergdaa51402018-10-01 12:31:19 +03001467 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001468
1469 /* remove any pci devices the firmware might have setup */
Mika Westerberg356b6c42019-09-19 15:25:30 +03001470 tb_switch_reset(tb->root_switch);
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001471
1472 tb_switch_resume(tb->root_switch);
1473 tb_free_invalid_tunnels(tb);
1474 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg91c0c122019-03-21 19:03:00 +02001475 tb_restore_children(tb->root_switch);
Mika Westerberg43bddb22021-11-14 17:20:59 +02001476
1477 /*
1478 * If we get here from suspend to disk the boot firmware or the
1479 * restore kernel might have created tunnels of its own. Since
1480 * we cannot be sure they are usable for us we find and tear
1481 * them down.
1482 */
1483 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
1484 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
1485 if (tb_tunnel_is_usb3(tunnel))
1486 usb3_delay = 500;
1487 tb_tunnel_deactivate(tunnel);
1488 tb_tunnel_free(tunnel);
1489 }
1490
1491 /* Re-create our tunnels now */
1492 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1493 /* USB3 requires delay before it can be re-activated */
1494 if (tb_tunnel_is_usb3(tunnel)) {
1495 msleep(usb3_delay);
1496 /* Only need to do it once */
1497 usb3_delay = 0;
1498 }
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001499 tb_tunnel_restart(tunnel);
Mika Westerberg43bddb22021-11-14 17:20:59 +02001500 }
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001501 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001502 /*
1503 * the pcie links need some time to get going.
1504 * 100ms works for me...
1505 */
Mika Westerbergdaa51402018-10-01 12:31:19 +03001506 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001507 msleep(100);
1508 }
1509 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001510 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +03001511 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001512
1513 return 0;
1514}
1515
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001516static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1517{
Mika Westerbergb433d012019-09-30 14:07:22 +03001518 struct tb_port *port;
1519 int ret = 0;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001520
Mika Westerbergb433d012019-09-30 14:07:22 +03001521 tb_switch_for_each_port(sw, port) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001522 if (tb_is_upstream_port(port))
1523 continue;
1524 if (port->xdomain && port->xdomain->is_unplugged) {
Kranthi Kuntaladacb1282020-03-05 16:39:58 +02001525 tb_retimer_remove_all(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001526 tb_xdomain_remove(port->xdomain);
Mika Westerberg284652a2020-04-09 14:23:32 +03001527 tb_port_unconfigure_xdomain(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001528 port->xdomain = NULL;
1529 ret++;
1530 } else if (port->remote) {
1531 ret += tb_free_unplugged_xdomains(port->remote->sw);
1532 }
1533 }
1534
1535 return ret;
1536}
1537
Mika Westerberg884e4d52020-08-31 13:05:14 +03001538static int tb_freeze_noirq(struct tb *tb)
1539{
1540 struct tb_cm *tcm = tb_priv(tb);
1541
1542 tcm->hotplug_active = false;
1543 return 0;
1544}
1545
1546static int tb_thaw_noirq(struct tb *tb)
1547{
1548 struct tb_cm *tcm = tb_priv(tb);
1549
1550 tcm->hotplug_active = true;
1551 return 0;
1552}
1553
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001554static void tb_complete(struct tb *tb)
1555{
1556 /*
1557 * Release any unplugged XDomains and if there is a case where
1558 * another domain is swapped in place of unplugged XDomain we
1559 * need to run another rescan.
1560 */
1561 mutex_lock(&tb->lock);
1562 if (tb_free_unplugged_xdomains(tb->root_switch))
1563 tb_scan_switch(tb->root_switch);
1564 mutex_unlock(&tb->lock);
1565}
1566
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001567static int tb_runtime_suspend(struct tb *tb)
1568{
1569 struct tb_cm *tcm = tb_priv(tb);
1570
1571 mutex_lock(&tb->lock);
1572 tb_switch_suspend(tb->root_switch, true);
1573 tcm->hotplug_active = false;
1574 mutex_unlock(&tb->lock);
1575
1576 return 0;
1577}
1578
1579static void tb_remove_work(struct work_struct *work)
1580{
1581 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1582 struct tb *tb = tcm_to_tb(tcm);
1583
1584 mutex_lock(&tb->lock);
1585 if (tb->root_switch) {
1586 tb_free_unplugged_children(tb->root_switch);
1587 tb_free_unplugged_xdomains(tb->root_switch);
1588 }
1589 mutex_unlock(&tb->lock);
1590}
1591
1592static int tb_runtime_resume(struct tb *tb)
1593{
1594 struct tb_cm *tcm = tb_priv(tb);
1595 struct tb_tunnel *tunnel, *n;
1596
1597 mutex_lock(&tb->lock);
1598 tb_switch_resume(tb->root_switch);
1599 tb_free_invalid_tunnels(tb);
1600 tb_restore_children(tb->root_switch);
1601 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1602 tb_tunnel_restart(tunnel);
1603 tcm->hotplug_active = true;
1604 mutex_unlock(&tb->lock);
1605
1606 /*
1607 * Schedule cleanup of any unplugged devices. Run this in a
1608 * separate thread to avoid possible deadlock if the device
1609 * removal runtime resumes the unplugged device.
1610 */
1611 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1612 return 0;
1613}
1614
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001615static const struct tb_cm_ops tb_cm_ops = {
1616 .start = tb_start,
1617 .stop = tb_stop,
1618 .suspend_noirq = tb_suspend_noirq,
1619 .resume_noirq = tb_resume_noirq,
Mika Westerberg884e4d52020-08-31 13:05:14 +03001620 .freeze_noirq = tb_freeze_noirq,
1621 .thaw_noirq = tb_thaw_noirq,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001622 .complete = tb_complete,
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001623 .runtime_suspend = tb_runtime_suspend,
1624 .runtime_resume = tb_runtime_resume,
Mika Westerberg81a54b52017-06-06 15:25:09 +03001625 .handle_event = tb_handle_event,
Mika Westerberg3da88be2020-11-10 11:47:14 +03001626 .disapprove_switch = tb_disconnect_pci,
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001627 .approve_switch = tb_tunnel_pci,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001628 .approve_xdomain_paths = tb_approve_xdomain_paths,
1629 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001630};
1631
Mika Westerberg349bfe02021-05-24 17:57:32 +03001632/*
1633 * During suspend the Thunderbolt controller is reset and all PCIe
1634 * tunnels are lost. The NHI driver will try to reestablish all tunnels
1635 * during resume. This adds device links between the tunneled PCIe
1636 * downstream ports and the NHI so that the device core will make sure
1637 * NHI is resumed first before the rest.
1638 */
1639static void tb_apple_add_links(struct tb_nhi *nhi)
1640{
1641 struct pci_dev *upstream, *pdev;
1642
1643 if (!x86_apple_machine)
1644 return;
1645
1646 switch (nhi->pdev->device) {
1647 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1648 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1649 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1650 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1651 break;
1652 default:
1653 return;
1654 }
1655
1656 upstream = pci_upstream_bridge(nhi->pdev);
1657 while (upstream) {
1658 if (!pci_is_pcie(upstream))
1659 return;
1660 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1661 break;
1662 upstream = pci_upstream_bridge(upstream);
1663 }
1664
1665 if (!upstream)
1666 return;
1667
1668 /*
1669 * For each hotplug downstream port, create add device link
1670 * back to NHI so that PCIe tunnels can be re-established after
1671 * sleep.
1672 */
1673 for_each_pci_bridge(pdev, upstream->subordinate) {
1674 const struct device_link *link;
1675
1676 if (!pci_is_pcie(pdev))
1677 continue;
1678 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1679 !pdev->is_hotplug_bridge)
1680 continue;
1681
1682 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1683 DL_FLAG_AUTOREMOVE_SUPPLIER |
1684 DL_FLAG_PM_RUNTIME);
1685 if (link) {
1686 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1687 dev_name(&pdev->dev));
1688 } else {
1689 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1690 dev_name(&pdev->dev));
1691 }
1692 }
1693}
1694
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001695struct tb *tb_probe(struct tb_nhi *nhi)
1696{
1697 struct tb_cm *tcm;
1698 struct tb *tb;
1699
Mika Westerberg7f0a34d2020-12-29 13:44:57 +02001700 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001701 if (!tb)
1702 return NULL;
1703
Mika Westerbergc6da62a2020-02-18 16:14:42 +02001704 if (tb_acpi_may_tunnel_pcie())
1705 tb->security_level = TB_SECURITY_USER;
1706 else
1707 tb->security_level = TB_SECURITY_NOPCIE;
1708
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001709 tb->cm_ops = &tb_cm_ops;
1710
1711 tcm = tb_priv(tb);
1712 INIT_LIST_HEAD(&tcm->tunnel_list);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001713 INIT_LIST_HEAD(&tcm->dp_resources);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001714 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001715
Mika Westerberge0258802020-11-10 11:02:31 +03001716 tb_dbg(tb, "using software connection manager\n");
1717
Mika Westerberg349bfe02021-05-24 17:57:32 +03001718 tb_apple_add_links(nhi);
1719 tb_acpi_add_links(nhi);
1720
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001721 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001722}