blob: 30e17f7d9e1faa94d7b0a5b0f9d2445a7a23c0c7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030012#include <linux/pm_runtime.h>
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020013
14#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020015#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020016#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020017
Mika Westerberg7f0a34d2020-12-29 13:44:57 +020018#define TB_TIMEOUT 100 /* ms */
19
Mika Westerberg9d3cce02017-06-06 15:25:00 +030020/**
21 * struct tb_cm - Simple Thunderbolt connection manager
22 * @tunnel_list: List of active tunnels
Mika Westerberg8afe9092019-03-26 15:52:30 +030023 * @dp_resources: List of available DP resources for DP tunneling
Mika Westerberg9d3cce02017-06-06 15:25:00 +030024 * @hotplug_active: tb_handle_hotplug will stop progressing plug
25 * events and exit if this is not set (it needs to
26 * acquire the lock one more time). Used to drain wq
27 * after cfg has been paused.
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030028 * @remove_work: Work used to remove any unplugged routers after
29 * runtime resume
Mika Westerberg9d3cce02017-06-06 15:25:00 +030030 */
31struct tb_cm {
32 struct list_head tunnel_list;
Mika Westerberg8afe9092019-03-26 15:52:30 +030033 struct list_head dp_resources;
Mika Westerberg9d3cce02017-06-06 15:25:00 +030034 bool hotplug_active;
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030035 struct delayed_work remove_work;
Mika Westerberg9d3cce02017-06-06 15:25:00 +030036};
Andreas Noever9da672a2014-06-03 22:04:05 +020037
Mika Westerberg6ac6fae2020-06-05 14:25:02 +030038static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
39{
40 return ((void *)tcm - sizeof(struct tb));
41}
42
Mika Westerberg4f807e42018-09-17 16:30:49 +030043struct tb_hotplug_event {
44 struct work_struct work;
45 struct tb *tb;
46 u64 route;
47 u8 port;
48 bool unplug;
49};
50
51static void tb_handle_hotplug(struct work_struct *work);
52
53static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
54{
55 struct tb_hotplug_event *ev;
56
57 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
58 if (!ev)
59 return;
60
61 ev->tb = tb;
62 ev->route = route;
63 ev->port = port;
64 ev->unplug = unplug;
65 INIT_WORK(&ev->work, tb_handle_hotplug);
66 queue_work(tb->wq, &ev->work);
67}
68
Andreas Noever9da672a2014-06-03 22:04:05 +020069/* enumeration & hot plug handling */
70
Mika Westerberg8afe9092019-03-26 15:52:30 +030071static void tb_add_dp_resources(struct tb_switch *sw)
72{
73 struct tb_cm *tcm = tb_priv(sw->tb);
74 struct tb_port *port;
75
76 tb_switch_for_each_port(sw, port) {
77 if (!tb_port_is_dpin(port))
78 continue;
79
80 if (!tb_switch_query_dp_resource(sw, port))
81 continue;
82
83 list_add_tail(&port->list, &tcm->dp_resources);
84 tb_port_dbg(port, "DP IN resource available\n");
85 }
86}
87
88static void tb_remove_dp_resources(struct tb_switch *sw)
89{
90 struct tb_cm *tcm = tb_priv(sw->tb);
91 struct tb_port *port, *tmp;
92
93 /* Clear children resources first */
94 tb_switch_for_each_port(sw, port) {
95 if (tb_port_has_remote(port))
96 tb_remove_dp_resources(port->remote->sw);
97 }
98
99 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
100 if (port->sw == sw) {
101 tb_port_dbg(port, "DP OUT resource unavailable\n");
102 list_del_init(&port->list);
103 }
104 }
105}
106
Mika Westerberg0414bec2017-02-19 23:43:26 +0200107static void tb_discover_tunnels(struct tb_switch *sw)
108{
109 struct tb *tb = sw->tb;
110 struct tb_cm *tcm = tb_priv(tb);
111 struct tb_port *port;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200112
Mika Westerbergb433d012019-09-30 14:07:22 +0300113 tb_switch_for_each_port(sw, port) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200114 struct tb_tunnel *tunnel = NULL;
115
Mika Westerberg0414bec2017-02-19 23:43:26 +0200116 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +0300117 case TB_TYPE_DP_HDMI_IN:
118 tunnel = tb_tunnel_discover_dp(tb, port);
119 break;
120
Mika Westerberg0414bec2017-02-19 23:43:26 +0200121 case TB_TYPE_PCIE_DOWN:
122 tunnel = tb_tunnel_discover_pci(tb, port);
123 break;
124
Rajmohan Manie6f81852019-12-17 15:33:44 +0300125 case TB_TYPE_USB3_DOWN:
126 tunnel = tb_tunnel_discover_usb3(tb, port);
127 break;
128
Mika Westerberg0414bec2017-02-19 23:43:26 +0200129 default:
130 break;
131 }
132
Mika Westerberg4f807e42018-09-17 16:30:49 +0300133 if (!tunnel)
134 continue;
135
136 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200137 struct tb_switch *parent = tunnel->dst_port->sw;
138
139 while (parent != tunnel->src_port->sw) {
140 parent->boot = true;
141 parent = tb_switch_parent(parent);
142 }
Mika Westerberg0414bec2017-02-19 23:43:26 +0200143 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300144
145 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200146 }
147
Mika Westerbergb433d012019-09-30 14:07:22 +0300148 tb_switch_for_each_port(sw, port) {
149 if (tb_port_has_remote(port))
150 tb_discover_tunnels(port->remote->sw);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200151 }
152}
Andreas Noever9da672a2014-06-03 22:04:05 +0200153
Mika Westerberg284652a2020-04-09 14:23:32 +0300154static int tb_port_configure_xdomain(struct tb_port *port)
155{
Mika Westerberg341d4512020-02-21 12:11:54 +0200156 /*
157 * XDomain paths currently only support single lane so we must
158 * disable the other lane according to USB4 spec.
159 */
160 tb_port_disable(port->dual_link_port);
161
Mika Westerberg284652a2020-04-09 14:23:32 +0300162 if (tb_switch_is_usb4(port->sw))
163 return usb4_port_configure_xdomain(port);
164 return tb_lc_configure_xdomain(port);
165}
166
167static void tb_port_unconfigure_xdomain(struct tb_port *port)
168{
169 if (tb_switch_is_usb4(port->sw))
170 usb4_port_unconfigure_xdomain(port);
171 else
172 tb_lc_unconfigure_xdomain(port);
Mika Westerberg341d4512020-02-21 12:11:54 +0200173
174 tb_port_enable(port->dual_link_port);
Mika Westerberg284652a2020-04-09 14:23:32 +0300175}
176
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300177static void tb_scan_xdomain(struct tb_port *port)
178{
179 struct tb_switch *sw = port->sw;
180 struct tb *tb = sw->tb;
181 struct tb_xdomain *xd;
182 u64 route;
183
Mika Westerberg5ca67682020-10-22 13:22:06 +0300184 if (!tb_is_xdomain_enabled())
185 return;
186
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300187 route = tb_downstream_route(port);
188 xd = tb_xdomain_find_by_route(tb, route);
189 if (xd) {
190 tb_xdomain_put(xd);
191 return;
192 }
193
194 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
195 NULL);
196 if (xd) {
197 tb_port_at(route, sw)->xdomain = xd;
Mika Westerberg284652a2020-04-09 14:23:32 +0300198 tb_port_configure_xdomain(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300199 tb_xdomain_add(xd);
200 }
201}
202
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300203static int tb_enable_tmu(struct tb_switch *sw)
204{
205 int ret;
206
207 /* If it is already enabled in correct mode, don't touch it */
208 if (tb_switch_tmu_is_enabled(sw))
209 return 0;
210
211 ret = tb_switch_tmu_disable(sw);
212 if (ret)
213 return ret;
214
215 ret = tb_switch_tmu_post_time(sw);
216 if (ret)
217 return ret;
218
219 return tb_switch_tmu_enable(sw);
220}
221
Rajmohan Manie6f81852019-12-17 15:33:44 +0300222/**
223 * tb_find_unused_port() - return the first inactive port on @sw
224 * @sw: Switch to find the port on
225 * @type: Port type to look for
226 */
227static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
228 enum tb_port_type type)
229{
230 struct tb_port *port;
231
232 tb_switch_for_each_port(sw, port) {
233 if (tb_is_upstream_port(port))
234 continue;
235 if (port->config.type != type)
236 continue;
237 if (!port->cap_adap)
238 continue;
239 if (tb_port_is_enabled(port))
240 continue;
241 return port;
242 }
243 return NULL;
244}
245
246static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
Mika Westerberg77cfa402020-03-11 16:00:46 +0300247 const struct tb_port *port)
Rajmohan Manie6f81852019-12-17 15:33:44 +0300248{
249 struct tb_port *down;
250
251 down = usb4_switch_map_usb3_down(sw, port);
Mika Westerberg77cfa402020-03-11 16:00:46 +0300252 if (down && !tb_usb3_port_is_enabled(down))
Rajmohan Manie6f81852019-12-17 15:33:44 +0300253 return down;
Mika Westerberg77cfa402020-03-11 16:00:46 +0300254 return NULL;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300255}
256
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200257static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
258 struct tb_port *src_port,
259 struct tb_port *dst_port)
260{
261 struct tb_cm *tcm = tb_priv(tb);
262 struct tb_tunnel *tunnel;
263
264 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
265 if (tunnel->type == type &&
266 ((src_port && src_port == tunnel->src_port) ||
267 (dst_port && dst_port == tunnel->dst_port))) {
268 return tunnel;
269 }
270 }
271
272 return NULL;
273}
274
275static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
276 struct tb_port *src_port,
277 struct tb_port *dst_port)
278{
279 struct tb_port *port, *usb3_down;
280 struct tb_switch *sw;
281
282 /* Pick the router that is deepest in the topology */
283 if (dst_port->sw->config.depth > src_port->sw->config.depth)
284 sw = dst_port->sw;
285 else
286 sw = src_port->sw;
287
288 /* Can't be the host router */
289 if (sw == tb->root_switch)
290 return NULL;
291
292 /* Find the downstream USB4 port that leads to this router */
293 port = tb_port_at(tb_route(sw), tb->root_switch);
294 /* Find the corresponding host router USB3 downstream port */
295 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
296 if (!usb3_down)
297 return NULL;
298
299 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
300}
301
302static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
303 struct tb_port *dst_port, int *available_up, int *available_down)
304{
305 int usb3_consumed_up, usb3_consumed_down, ret;
306 struct tb_cm *tcm = tb_priv(tb);
307 struct tb_tunnel *tunnel;
308 struct tb_port *port;
309
310 tb_port_dbg(dst_port, "calculating available bandwidth\n");
311
312 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
313 if (tunnel) {
314 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
315 &usb3_consumed_down);
316 if (ret)
317 return ret;
318 } else {
319 usb3_consumed_up = 0;
320 usb3_consumed_down = 0;
321 }
322
323 *available_up = *available_down = 40000;
324
325 /* Find the minimum available bandwidth over all links */
326 tb_for_each_port_on_path(src_port, dst_port, port) {
327 int link_speed, link_width, up_bw, down_bw;
328
329 if (!tb_port_is_null(port))
330 continue;
331
332 if (tb_is_upstream_port(port)) {
333 link_speed = port->sw->link_speed;
334 } else {
335 link_speed = tb_port_get_link_speed(port);
336 if (link_speed < 0)
337 return link_speed;
338 }
339
340 link_width = port->bonded ? 2 : 1;
341
342 up_bw = link_speed * link_width * 1000; /* Mb/s */
343 /* Leave 10% guard band */
344 up_bw -= up_bw / 10;
345 down_bw = up_bw;
346
347 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
348
349 /*
350 * Find all DP tunnels that cross the port and reduce
351 * their consumed bandwidth from the available.
352 */
353 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
354 int dp_consumed_up, dp_consumed_down;
355
356 if (!tb_tunnel_is_dp(tunnel))
357 continue;
358
359 if (!tb_tunnel_port_on_path(tunnel, port))
360 continue;
361
362 ret = tb_tunnel_consumed_bandwidth(tunnel,
363 &dp_consumed_up,
364 &dp_consumed_down);
365 if (ret)
366 return ret;
367
368 up_bw -= dp_consumed_up;
369 down_bw -= dp_consumed_down;
370 }
371
372 /*
373 * If USB3 is tunneled from the host router down to the
374 * branch leading to port we need to take USB3 consumed
375 * bandwidth into account regardless whether it actually
376 * crosses the port.
377 */
378 up_bw -= usb3_consumed_up;
379 down_bw -= usb3_consumed_down;
380
381 if (up_bw < *available_up)
382 *available_up = up_bw;
383 if (down_bw < *available_down)
384 *available_down = down_bw;
385 }
386
387 if (*available_up < 0)
388 *available_up = 0;
389 if (*available_down < 0)
390 *available_down = 0;
391
392 return 0;
393}
394
395static int tb_release_unused_usb3_bandwidth(struct tb *tb,
396 struct tb_port *src_port,
397 struct tb_port *dst_port)
398{
399 struct tb_tunnel *tunnel;
400
401 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
402 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
403}
404
405static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
406 struct tb_port *dst_port)
407{
408 int ret, available_up, available_down;
409 struct tb_tunnel *tunnel;
410
411 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
412 if (!tunnel)
413 return;
414
415 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
416
417 /*
418 * Calculate available bandwidth for the first hop USB3 tunnel.
419 * That determines the whole USB3 bandwidth for this branch.
420 */
421 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
422 &available_up, &available_down);
423 if (ret) {
424 tb_warn(tb, "failed to calculate available bandwidth\n");
425 return;
426 }
427
428 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
429 available_up, available_down);
430
431 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
432}
433
Rajmohan Manie6f81852019-12-17 15:33:44 +0300434static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
435{
436 struct tb_switch *parent = tb_switch_parent(sw);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200437 int ret, available_up, available_down;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300438 struct tb_port *up, *down, *port;
439 struct tb_cm *tcm = tb_priv(tb);
440 struct tb_tunnel *tunnel;
441
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200442 if (!tb_acpi_may_tunnel_usb3()) {
443 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
444 return 0;
445 }
446
Rajmohan Manie6f81852019-12-17 15:33:44 +0300447 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
448 if (!up)
449 return 0;
450
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200451 if (!sw->link_usb4)
452 return 0;
453
Rajmohan Manie6f81852019-12-17 15:33:44 +0300454 /*
455 * Look up available down port. Since we are chaining it should
456 * be found right above this switch.
457 */
458 port = tb_port_at(tb_route(sw), parent);
459 down = tb_find_usb3_down(parent, port);
460 if (!down)
461 return 0;
462
463 if (tb_route(parent)) {
464 struct tb_port *parent_up;
465 /*
466 * Check first that the parent switch has its upstream USB3
467 * port enabled. Otherwise the chain is not complete and
468 * there is no point setting up a new tunnel.
469 */
470 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
471 if (!parent_up || !tb_port_is_enabled(parent_up))
472 return 0;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200473
474 /* Make all unused bandwidth available for the new tunnel */
475 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
476 if (ret)
477 return ret;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300478 }
479
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200480 ret = tb_available_bandwidth(tb, down, up, &available_up,
481 &available_down);
482 if (ret)
483 goto err_reclaim;
484
485 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
486 available_up, available_down);
487
488 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
489 available_down);
490 if (!tunnel) {
491 ret = -ENOMEM;
492 goto err_reclaim;
493 }
Rajmohan Manie6f81852019-12-17 15:33:44 +0300494
495 if (tb_tunnel_activate(tunnel)) {
496 tb_port_info(up,
497 "USB3 tunnel activation failed, aborting\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200498 ret = -EIO;
499 goto err_free;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300500 }
501
502 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200503 if (tb_route(parent))
504 tb_reclaim_usb3_bandwidth(tb, down, up);
505
Rajmohan Manie6f81852019-12-17 15:33:44 +0300506 return 0;
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200507
508err_free:
509 tb_tunnel_free(tunnel);
510err_reclaim:
511 if (tb_route(parent))
512 tb_reclaim_usb3_bandwidth(tb, down, up);
513
514 return ret;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300515}
516
517static int tb_create_usb3_tunnels(struct tb_switch *sw)
518{
519 struct tb_port *port;
520 int ret;
521
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200522 if (!tb_acpi_may_tunnel_usb3())
523 return 0;
524
Rajmohan Manie6f81852019-12-17 15:33:44 +0300525 if (tb_route(sw)) {
526 ret = tb_tunnel_usb3(sw->tb, sw);
527 if (ret)
528 return ret;
529 }
530
531 tb_switch_for_each_port(sw, port) {
532 if (!tb_port_has_remote(port))
533 continue;
534 ret = tb_create_usb3_tunnels(port->remote->sw);
535 if (ret)
536 return ret;
537 }
538
539 return 0;
540}
541
Andreas Noever9da672a2014-06-03 22:04:05 +0200542static void tb_scan_port(struct tb_port *port);
543
Lee Jones877e50b2021-01-27 11:25:50 +0000544/*
Andreas Noever9da672a2014-06-03 22:04:05 +0200545 * tb_scan_switch() - scan for and initialize downstream switches
546 */
547static void tb_scan_switch(struct tb_switch *sw)
548{
Mika Westerbergb433d012019-09-30 14:07:22 +0300549 struct tb_port *port;
550
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300551 pm_runtime_get_sync(&sw->dev);
552
Mika Westerbergb433d012019-09-30 14:07:22 +0300553 tb_switch_for_each_port(sw, port)
554 tb_scan_port(port);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300555
556 pm_runtime_mark_last_busy(&sw->dev);
557 pm_runtime_put_autosuspend(&sw->dev);
Andreas Noever9da672a2014-06-03 22:04:05 +0200558}
559
Lee Jones877e50b2021-01-27 11:25:50 +0000560/*
Andreas Noever9da672a2014-06-03 22:04:05 +0200561 * tb_scan_port() - check for and initialize switches below port
562 */
563static void tb_scan_port(struct tb_port *port)
564{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200565 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200566 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200567 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200568
Andreas Noever9da672a2014-06-03 22:04:05 +0200569 if (tb_is_upstream_port(port))
570 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300571
572 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
573 !tb_dp_port_is_enabled(port)) {
574 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
575 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
576 false);
577 return;
578 }
579
Andreas Noever9da672a2014-06-03 22:04:05 +0200580 if (port->config.type != TB_TYPE_PORT)
581 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200582 if (port->dual_link_port && port->link_nr)
583 return; /*
584 * Downstream switch is reachable through two ports.
585 * Only scan on the primary port (link_nr == 0).
586 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200587 if (tb_wait_for_port(port, false) <= 0)
588 return;
589 if (port->remote) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300590 tb_port_dbg(port, "port already has a remote\n");
Andreas Noever9da672a2014-06-03 22:04:05 +0200591 return;
592 }
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200593
594 tb_retimer_scan(port);
595
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300596 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
597 tb_downstream_route(port));
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300598 if (IS_ERR(sw)) {
599 /*
600 * If there is an error accessing the connected switch
601 * it may be connected to another domain. Also we allow
602 * the other domain to be connected to a max depth switch.
603 */
604 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
605 tb_scan_xdomain(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200606 return;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300607 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300608
609 if (tb_switch_configure(sw)) {
610 tb_switch_put(sw);
611 return;
612 }
613
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200614 /*
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300615 * If there was previously another domain connected remove it
616 * first.
617 */
618 if (port->xdomain) {
619 tb_xdomain_remove(port->xdomain);
Mika Westerberg284652a2020-04-09 14:23:32 +0300620 tb_port_unconfigure_xdomain(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300621 port->xdomain = NULL;
622 }
623
624 /*
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200625 * Do not send uevents until we have discovered all existing
626 * tunnels and know which switches were authorized already by
627 * the boot firmware.
628 */
629 if (!tcm->hotplug_active)
630 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300631
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300632 /*
633 * At the moment Thunderbolt 2 and beyond (devices with LC) we
634 * can support runtime PM.
635 */
636 sw->rpm = sw->generation > 1;
637
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300638 if (tb_switch_add(sw)) {
639 tb_switch_put(sw);
640 return;
641 }
642
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200643 /* Link the switches using both links if available */
644 upstream_port = tb_upstream_port(sw);
645 port->remote = upstream_port;
646 upstream_port->remote = port;
647 if (port->dual_link_port && upstream_port->dual_link_port) {
648 port->dual_link_port->remote = upstream_port->dual_link_port;
649 upstream_port->dual_link_port->remote = port->dual_link_port;
650 }
651
Mika Westerberg91c0c122019-03-21 19:03:00 +0200652 /* Enable lane bonding if supported */
Mika Westerberg2ca32632020-04-02 14:28:18 +0300653 tb_switch_lane_bonding_enable(sw);
Mika Westerbergde462032020-04-02 14:50:52 +0300654 /* Set the link configured */
655 tb_switch_configure_link(sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200656
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300657 if (tb_enable_tmu(sw))
658 tb_sw_warn(sw, "failed to enable TMU\n");
659
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200660 /* Scan upstream retimers */
661 tb_retimer_scan(upstream_port);
662
Rajmohan Manie6f81852019-12-17 15:33:44 +0300663 /*
664 * Create USB 3.x tunnels only when the switch is plugged to the
665 * domain. This is because we scan the domain also during discovery
666 * and want to discover existing USB 3.x tunnels before we create
667 * any new.
668 */
669 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
670 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
671
Mika Westerberge876f342020-04-02 12:53:14 +0300672 tb_add_dp_resources(sw);
Andreas Noever9da672a2014-06-03 22:04:05 +0200673 tb_scan_switch(sw);
674}
675
Mika Westerberg8afe9092019-03-26 15:52:30 +0300676static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
677{
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200678 struct tb_port *src_port, *dst_port;
679 struct tb *tb;
680
Mika Westerberg8afe9092019-03-26 15:52:30 +0300681 if (!tunnel)
682 return;
683
684 tb_tunnel_deactivate(tunnel);
685 list_del(&tunnel->list);
686
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200687 tb = tunnel->tb;
688 src_port = tunnel->src_port;
689 dst_port = tunnel->dst_port;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300690
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200691 switch (tunnel->type) {
692 case TB_TUNNEL_DP:
693 /*
694 * In case of DP tunnel make sure the DP IN resource is
695 * deallocated properly.
696 */
697 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300698 /* Now we can allow the domain to runtime suspend again */
699 pm_runtime_mark_last_busy(&dst_port->sw->dev);
700 pm_runtime_put_autosuspend(&dst_port->sw->dev);
701 pm_runtime_mark_last_busy(&src_port->sw->dev);
702 pm_runtime_put_autosuspend(&src_port->sw->dev);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200703 fallthrough;
704
705 case TB_TUNNEL_USB3:
706 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
707 break;
708
709 default:
710 /*
711 * PCIe and DMA tunnels do not consume guaranteed
712 * bandwidth.
713 */
714 break;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300715 }
716
717 tb_tunnel_free(tunnel);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300718}
719
Lee Jones877e50b2021-01-27 11:25:50 +0000720/*
Andreas Noever3364f0c2014-06-03 22:04:08 +0200721 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
722 */
723static void tb_free_invalid_tunnels(struct tb *tb)
724{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300725 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200726 struct tb_tunnel *tunnel;
727 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300728
729 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300730 if (tb_tunnel_is_invalid(tunnel))
731 tb_deactivate_and_free_tunnel(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200732 }
733}
734
Lee Jones877e50b2021-01-27 11:25:50 +0000735/*
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200736 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
737 */
738static void tb_free_unplugged_children(struct tb_switch *sw)
739{
Mika Westerbergb433d012019-09-30 14:07:22 +0300740 struct tb_port *port;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200741
Mika Westerbergb433d012019-09-30 14:07:22 +0300742 tb_switch_for_each_port(sw, port) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200743 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200744 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200745
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200746 if (port->remote->sw->is_unplugged) {
Kranthi Kuntaladacb1282020-03-05 16:39:58 +0200747 tb_retimer_remove_all(port);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300748 tb_remove_dp_resources(port->remote->sw);
Mika Westerbergde462032020-04-02 14:50:52 +0300749 tb_switch_unconfigure_link(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200750 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300751 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200752 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200753 if (port->dual_link_port)
754 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200755 } else {
756 tb_free_unplugged_children(port->remote->sw);
757 }
758 }
759}
760
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200761static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
762 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200763{
Mika Westerbergb0407982019-12-17 15:33:40 +0300764 struct tb_port *down = NULL;
765
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200766 /*
767 * To keep plugging devices consistently in the same PCIe
Mika Westerbergb0407982019-12-17 15:33:40 +0300768 * hierarchy, do mapping here for switch downstream PCIe ports.
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200769 */
Mika Westerbergb0407982019-12-17 15:33:40 +0300770 if (tb_switch_is_usb4(sw)) {
771 down = usb4_switch_map_pcie_down(sw, port);
772 } else if (!tb_route(sw)) {
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200773 int phy_port = tb_phy_port_from_link(port->port);
774 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300775
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200776 /*
777 * Hard-coded Thunderbolt port to PCIe down port mapping
778 * per controller.
779 */
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200780 if (tb_switch_is_cactus_ridge(sw) ||
781 tb_switch_is_alpine_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200782 index = !phy_port ? 6 : 7;
Mika Westerberg17a8f812019-10-08 16:42:47 +0300783 else if (tb_switch_is_falcon_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200784 index = !phy_port ? 6 : 8;
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200785 else if (tb_switch_is_titan_ridge(sw))
786 index = !phy_port ? 8 : 9;
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200787 else
788 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200789
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200790 /* Validate the hard-coding */
791 if (WARN_ON(index > sw->config.max_port_number))
792 goto out;
Mika Westerbergb0407982019-12-17 15:33:40 +0300793
794 down = &sw->ports[index];
795 }
796
797 if (down) {
798 if (WARN_ON(!tb_port_is_pcie_down(down)))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200799 goto out;
Mika Westerberg9cac51a2020-03-11 16:12:50 +0300800 if (tb_pci_port_is_enabled(down))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200801 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200802
Mika Westerbergb0407982019-12-17 15:33:40 +0300803 return down;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200804 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200805
806out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300807 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200808}
809
Mika Westerberge876f342020-04-02 12:53:14 +0300810static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
811{
812 struct tb_port *host_port, *port;
813 struct tb_cm *tcm = tb_priv(tb);
814
815 host_port = tb_route(in->sw) ?
816 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
817
818 list_for_each_entry(port, &tcm->dp_resources, list) {
819 if (!tb_port_is_dpout(port))
820 continue;
821
822 if (tb_port_is_enabled(port)) {
823 tb_port_dbg(port, "in use\n");
824 continue;
825 }
826
827 tb_port_dbg(port, "DP OUT available\n");
828
829 /*
830 * Keep the DP tunnel under the topology starting from
831 * the same host router downstream port.
832 */
833 if (host_port && tb_route(port->sw)) {
834 struct tb_port *p;
835
836 p = tb_port_at(tb_route(port->sw), tb->root_switch);
837 if (p != host_port)
838 continue;
839 }
840
841 return port;
842 }
843
844 return NULL;
845}
846
Mika Westerberg8afe9092019-03-26 15:52:30 +0300847static void tb_tunnel_dp(struct tb *tb)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300848{
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200849 int available_up, available_down, ret;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300850 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300851 struct tb_port *port, *in, *out;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300852 struct tb_tunnel *tunnel;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300853
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200854 if (!tb_acpi_may_tunnel_dp()) {
855 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
856 return;
857 }
858
Mika Westerberg8afe9092019-03-26 15:52:30 +0300859 /*
860 * Find pair of inactive DP IN and DP OUT adapters and then
861 * establish a DP tunnel between them.
862 */
863 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300864
Mika Westerberg8afe9092019-03-26 15:52:30 +0300865 in = NULL;
866 out = NULL;
867 list_for_each_entry(port, &tcm->dp_resources, list) {
Mika Westerberge876f342020-04-02 12:53:14 +0300868 if (!tb_port_is_dpin(port))
869 continue;
870
Mika Westerberg8afe9092019-03-26 15:52:30 +0300871 if (tb_port_is_enabled(port)) {
872 tb_port_dbg(port, "in use\n");
873 continue;
874 }
875
Mika Westerberge876f342020-04-02 12:53:14 +0300876 tb_port_dbg(port, "DP IN available\n");
Mika Westerberg8afe9092019-03-26 15:52:30 +0300877
Mika Westerberge876f342020-04-02 12:53:14 +0300878 out = tb_find_dp_out(tb, port);
879 if (out) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300880 in = port;
Mika Westerberge876f342020-04-02 12:53:14 +0300881 break;
882 }
Mika Westerberg8afe9092019-03-26 15:52:30 +0300883 }
884
885 if (!in) {
886 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
887 return;
888 }
889 if (!out) {
890 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
891 return;
892 }
893
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300894 /*
895 * DP stream needs the domain to be active so runtime resume
896 * both ends of the tunnel.
897 *
898 * This should bring the routers in the middle active as well
899 * and keeps the domain from runtime suspending while the DP
900 * tunnel is active.
901 */
902 pm_runtime_get_sync(&in->sw->dev);
903 pm_runtime_get_sync(&out->sw->dev);
904
Mika Westerberg8afe9092019-03-26 15:52:30 +0300905 if (tb_switch_alloc_dp_resource(in->sw, in)) {
906 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300907 goto err_rpm_put;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300908 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300909
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200910 /* Make all unused USB3 bandwidth available for the new DP tunnel */
911 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
912 if (ret) {
913 tb_warn(tb, "failed to release unused bandwidth\n");
914 goto err_dealloc_dp;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300915 }
916
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200917 ret = tb_available_bandwidth(tb, in, out, &available_up,
918 &available_down);
919 if (ret)
920 goto err_reclaim;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300921
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200922 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
923 available_up, available_down);
924
925 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300926 if (!tunnel) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300927 tb_port_dbg(out, "could not allocate DP tunnel\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200928 goto err_reclaim;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300929 }
930
931 if (tb_tunnel_activate(tunnel)) {
932 tb_port_info(out, "DP tunnel activation failed, aborting\n");
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200933 goto err_free;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300934 }
935
936 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200937 tb_reclaim_usb3_bandwidth(tb, in, out);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300938 return;
939
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200940err_free:
941 tb_tunnel_free(tunnel);
942err_reclaim:
943 tb_reclaim_usb3_bandwidth(tb, in, out);
944err_dealloc_dp:
Mika Westerberg8afe9092019-03-26 15:52:30 +0300945 tb_switch_dealloc_dp_resource(in->sw, in);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +0300946err_rpm_put:
947 pm_runtime_mark_last_busy(&out->sw->dev);
948 pm_runtime_put_autosuspend(&out->sw->dev);
949 pm_runtime_mark_last_busy(&in->sw->dev);
950 pm_runtime_put_autosuspend(&in->sw->dev);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300951}
952
Mika Westerberg8afe9092019-03-26 15:52:30 +0300953static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300954{
Mika Westerberg8afe9092019-03-26 15:52:30 +0300955 struct tb_port *in, *out;
956 struct tb_tunnel *tunnel;
957
958 if (tb_port_is_dpin(port)) {
959 tb_port_dbg(port, "DP IN resource unavailable\n");
960 in = port;
961 out = NULL;
962 } else {
963 tb_port_dbg(port, "DP OUT resource unavailable\n");
964 in = NULL;
965 out = port;
966 }
967
968 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
969 tb_deactivate_and_free_tunnel(tunnel);
970 list_del_init(&port->list);
971
972 /*
973 * See if there is another DP OUT port that can be used for
974 * to create another tunnel.
975 */
976 tb_tunnel_dp(tb);
977}
978
979static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
980{
981 struct tb_cm *tcm = tb_priv(tb);
982 struct tb_port *p;
983
984 if (tb_port_is_enabled(port))
985 return;
986
987 list_for_each_entry(p, &tcm->dp_resources, list) {
988 if (p == port)
989 return;
990 }
991
992 tb_port_dbg(port, "DP %s resource available\n",
993 tb_port_is_dpin(port) ? "IN" : "OUT");
994 list_add_tail(&port->list, &tcm->dp_resources);
995
996 /* Look for suitable DP IN <-> DP OUT pairs now */
997 tb_tunnel_dp(tb);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300998}
999
Mika Westerberg81a2e3e2020-05-16 16:20:39 +03001000static void tb_disconnect_and_release_dp(struct tb *tb)
1001{
1002 struct tb_cm *tcm = tb_priv(tb);
1003 struct tb_tunnel *tunnel, *n;
1004
1005 /*
1006 * Tear down all DP tunnels and release their resources. They
1007 * will be re-established after resume based on plug events.
1008 */
1009 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1010 if (tb_tunnel_is_dp(tunnel))
1011 tb_deactivate_and_free_tunnel(tunnel);
1012 }
1013
1014 while (!list_empty(&tcm->dp_resources)) {
1015 struct tb_port *port;
1016
1017 port = list_first_entry(&tcm->dp_resources,
1018 struct tb_port, list);
1019 list_del_init(&port->list);
1020 }
1021}
1022
Mika Westerberg3da88be2020-11-10 11:47:14 +03001023static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1024{
1025 struct tb_tunnel *tunnel;
1026 struct tb_port *up;
1027
1028 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1029 if (WARN_ON(!up))
1030 return -ENODEV;
1031
1032 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1033 if (WARN_ON(!tunnel))
1034 return -ENODEV;
1035
1036 tb_tunnel_deactivate(tunnel);
1037 list_del(&tunnel->list);
1038 tb_tunnel_free(tunnel);
1039 return 0;
1040}
1041
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001042static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1043{
1044 struct tb_port *up, *down, *port;
1045 struct tb_cm *tcm = tb_priv(tb);
1046 struct tb_switch *parent_sw;
1047 struct tb_tunnel *tunnel;
1048
Mika Westerberg386e5e22019-12-17 15:33:37 +03001049 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001050 if (!up)
1051 return 0;
1052
1053 /*
1054 * Look up available down port. Since we are chaining it should
1055 * be found right above this switch.
1056 */
1057 parent_sw = tb_to_switch(sw->dev.parent);
1058 port = tb_port_at(tb_route(sw), parent_sw);
1059 down = tb_find_pcie_down(parent_sw, port);
1060 if (!down)
1061 return 0;
1062
1063 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1064 if (!tunnel)
1065 return -ENOMEM;
1066
1067 if (tb_tunnel_activate(tunnel)) {
1068 tb_port_info(up,
1069 "PCIe tunnel activation failed, aborting\n");
1070 tb_tunnel_free(tunnel);
1071 return -EIO;
1072 }
1073
1074 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1075 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001076}
Andreas Noever9da672a2014-06-03 22:04:05 +02001077
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001078static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1079{
1080 struct tb_cm *tcm = tb_priv(tb);
1081 struct tb_port *nhi_port, *dst_port;
1082 struct tb_tunnel *tunnel;
1083 struct tb_switch *sw;
1084
1085 sw = tb_to_switch(xd->dev.parent);
1086 dst_port = tb_port_at(xd->route, sw);
Mika Westerberg386e5e22019-12-17 15:33:37 +03001087 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001088
1089 mutex_lock(&tb->lock);
1090 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
1091 xd->transmit_path, xd->receive_ring,
1092 xd->receive_path);
1093 if (!tunnel) {
1094 mutex_unlock(&tb->lock);
1095 return -ENOMEM;
1096 }
1097
1098 if (tb_tunnel_activate(tunnel)) {
1099 tb_port_info(nhi_port,
1100 "DMA tunnel activation failed, aborting\n");
1101 tb_tunnel_free(tunnel);
1102 mutex_unlock(&tb->lock);
1103 return -EIO;
1104 }
1105
1106 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1107 mutex_unlock(&tb->lock);
1108 return 0;
1109}
1110
1111static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1112{
1113 struct tb_port *dst_port;
Mika Westerberg8afe9092019-03-26 15:52:30 +03001114 struct tb_tunnel *tunnel;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001115 struct tb_switch *sw;
1116
1117 sw = tb_to_switch(xd->dev.parent);
1118 dst_port = tb_port_at(xd->route, sw);
1119
1120 /*
1121 * It is possible that the tunnel was already teared down (in
1122 * case of cable disconnect) so it is fine if we cannot find it
1123 * here anymore.
1124 */
Mika Westerberg8afe9092019-03-26 15:52:30 +03001125 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
1126 tb_deactivate_and_free_tunnel(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001127}
1128
1129static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1130{
1131 if (!xd->is_unplugged) {
1132 mutex_lock(&tb->lock);
1133 __tb_disconnect_xdomain_paths(tb, xd);
1134 mutex_unlock(&tb->lock);
1135 }
1136 return 0;
1137}
1138
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001139/* hotplug handling */
1140
Lee Jones877e50b2021-01-27 11:25:50 +00001141/*
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001142 * tb_handle_hotplug() - handle hotplug event
1143 *
1144 * Executes on tb->wq.
1145 */
1146static void tb_handle_hotplug(struct work_struct *work)
1147{
1148 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1149 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001150 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +02001151 struct tb_switch *sw;
1152 struct tb_port *port;
Mika Westerberg284652a2020-04-09 14:23:32 +03001153
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001154 /* Bring the domain back from sleep if it was suspended */
1155 pm_runtime_get_sync(&tb->dev);
1156
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001157 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001158 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001159 goto out; /* during init, suspend or shutdown */
1160
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001161 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +02001162 if (!sw) {
1163 tb_warn(tb,
1164 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1165 ev->route, ev->port, ev->unplug);
1166 goto out;
1167 }
1168 if (ev->port > sw->config.max_port_number) {
1169 tb_warn(tb,
1170 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1171 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001172 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +02001173 }
1174 port = &sw->ports[ev->port];
1175 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +02001176 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1177 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001178 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +02001179 }
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001180
1181 pm_runtime_get_sync(&sw->dev);
1182
Andreas Noever053596d2014-06-03 22:04:06 +02001183 if (ev->unplug) {
Kranthi Kuntaladacb1282020-03-05 16:39:58 +02001184 tb_retimer_remove_all(port);
1185
Mika Westerbergdfe40ca2019-03-07 15:26:45 +02001186 if (tb_port_has_remote(port)) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001187 tb_port_dbg(port, "switch unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +01001188 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001189 tb_free_invalid_tunnels(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001190 tb_remove_dp_resources(port->remote->sw);
Rajmohan Manicf29b9af2019-12-17 15:33:43 +03001191 tb_switch_tmu_disable(port->remote->sw);
Mika Westerbergde462032020-04-02 14:50:52 +03001192 tb_switch_unconfigure_link(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +02001193 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001194 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +02001195 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +02001196 if (port->dual_link_port)
1197 port->dual_link_port->remote = NULL;
Mika Westerberg8afe9092019-03-26 15:52:30 +03001198 /* Maybe we can create another DP tunnel */
1199 tb_tunnel_dp(tb);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001200 } else if (port->xdomain) {
1201 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1202
1203 tb_port_dbg(port, "xdomain unplugged\n");
1204 /*
1205 * Service drivers are unbound during
1206 * tb_xdomain_remove() so setting XDomain as
1207 * unplugged here prevents deadlock if they call
1208 * tb_xdomain_disable_paths(). We will tear down
1209 * the path below.
1210 */
1211 xd->is_unplugged = true;
1212 tb_xdomain_remove(xd);
1213 port->xdomain = NULL;
1214 __tb_disconnect_xdomain_paths(tb, xd);
1215 tb_xdomain_put(xd);
Mika Westerberg284652a2020-04-09 14:23:32 +03001216 tb_port_unconfigure_xdomain(port);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001217 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1218 tb_dp_resource_unavailable(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +02001219 } else {
Mika Westerberg62efe692018-09-17 16:32:13 +03001220 tb_port_dbg(port,
1221 "got unplug event for disconnected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +02001222 }
1223 } else if (port->remote) {
Mika Westerberg62efe692018-09-17 16:32:13 +03001224 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +02001225 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +03001226 if (tb_port_is_null(port)) {
Mika Westerberg62efe692018-09-17 16:32:13 +03001227 tb_port_dbg(port, "hotplug: scanning\n");
Mika Westerberg344e0642017-10-11 17:19:54 +03001228 tb_scan_port(port);
1229 if (!port->remote)
Mika Westerberg62efe692018-09-17 16:32:13 +03001230 tb_port_dbg(port, "hotplug: no switch found\n");
Mika Westerberg8afe9092019-03-26 15:52:30 +03001231 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1232 tb_dp_resource_available(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +03001233 }
Andreas Noever053596d2014-06-03 22:04:06 +02001234 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001235
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001236 pm_runtime_mark_last_busy(&sw->dev);
1237 pm_runtime_put_autosuspend(&sw->dev);
1238
Mika Westerberg8f965ef2019-03-15 14:56:21 +02001239put_sw:
1240 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001241out:
1242 mutex_unlock(&tb->lock);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001243
1244 pm_runtime_mark_last_busy(&tb->dev);
1245 pm_runtime_put_autosuspend(&tb->dev);
1246
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001247 kfree(ev);
1248}
1249
Lee Jones877e50b2021-01-27 11:25:50 +00001250/*
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001251 * tb_schedule_hotplug_handler() - callback function for the control channel
1252 *
1253 * Delegates to tb_handle_hotplug.
1254 */
Mika Westerberg81a54b52017-06-06 15:25:09 +03001255static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1256 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001257{
Mika Westerberg81a54b52017-06-06 15:25:09 +03001258 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +03001259 u64 route;
1260
1261 if (type != TB_CFG_PKG_EVENT) {
1262 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1263 return;
1264 }
1265
1266 route = tb_cfg_get_route(&pkg->header);
1267
Mika Westerberg210e9f52019-12-17 15:33:39 +03001268 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
Mika Westerberg81a54b52017-06-06 15:25:09 +03001269 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1270 pkg->port);
1271 }
1272
Mika Westerberg4f807e42018-09-17 16:30:49 +03001273 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001274}
1275
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001276static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001277{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001278 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001279 struct tb_tunnel *tunnel;
1280 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +02001281
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001282 cancel_delayed_work(&tcm->remove_work);
Andreas Noever3364f0c2014-06-03 22:04:08 +02001283 /* tunnels are only present after everything has been initialized */
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001284 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1285 /*
1286 * DMA tunnels require the driver to be functional so we
1287 * tear them down. Other protocol tunnels can be left
1288 * intact.
1289 */
1290 if (tb_tunnel_is_dma(tunnel))
1291 tb_tunnel_deactivate(tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001292 tb_tunnel_free(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001293 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001294 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001295 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001296}
1297
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001298static int tb_scan_finalize_switch(struct device *dev, void *data)
1299{
1300 if (tb_is_switch(dev)) {
1301 struct tb_switch *sw = tb_to_switch(dev);
1302
1303 /*
1304 * If we found that the switch was already setup by the
1305 * boot firmware, mark it as authorized now before we
1306 * send uevent to userspace.
1307 */
1308 if (sw->boot)
1309 sw->authorized = 1;
1310
1311 dev_set_uevent_suppress(dev, false);
1312 kobject_uevent(&dev->kobj, KOBJ_ADD);
1313 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1314 }
1315
1316 return 0;
1317}
1318
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001319static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001320{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001321 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001322 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001323
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001324 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Mika Westerberg444ac382018-12-30 12:17:52 +02001325 if (IS_ERR(tb->root_switch))
1326 return PTR_ERR(tb->root_switch);
Andreas Noevera25c8b22014-06-03 22:04:02 +02001327
Mika Westerberge6b245c2017-06-06 15:25:17 +03001328 /*
1329 * ICM firmware upgrade needs running firmware and in native
1330 * mode that is not available so disable firmware upgrade of the
1331 * root switch.
1332 */
1333 tb->root_switch->no_nvm_upgrade = true;
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001334 /* All USB4 routers support runtime PM */
1335 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
Mika Westerberge6b245c2017-06-06 15:25:17 +03001336
Mika Westerbergbfe778a2017-06-06 15:25:01 +03001337 ret = tb_switch_configure(tb->root_switch);
1338 if (ret) {
1339 tb_switch_put(tb->root_switch);
1340 return ret;
1341 }
1342
1343 /* Announce the switch to the world */
1344 ret = tb_switch_add(tb->root_switch);
1345 if (ret) {
1346 tb_switch_put(tb->root_switch);
1347 return ret;
1348 }
1349
Rajmohan Manicf29b9af2019-12-17 15:33:43 +03001350 /* Enable TMU if it is off */
1351 tb_switch_tmu_enable(tb->root_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +02001352 /* Full scan to discover devices added before the driver was loaded. */
1353 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +02001354 /* Find out tunnels created by the boot firmware */
1355 tb_discover_tunnels(tb->root_switch);
Rajmohan Manie6f81852019-12-17 15:33:44 +03001356 /*
1357 * If the boot firmware did not create USB 3.x tunnels create them
1358 * now for the whole topology.
1359 */
1360 tb_create_usb3_tunnels(tb->root_switch);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001361 /* Add DP IN resources for the root switch */
1362 tb_add_dp_resources(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001363 /* Make the discovered switches available to the userspace */
1364 device_for_each_child(&tb->root_switch->dev, NULL,
1365 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +02001366
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001367 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001368 tcm->hotplug_active = true;
1369 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001370}
1371
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001372static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001373{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001374 struct tb_cm *tcm = tb_priv(tb);
1375
Mika Westerbergdaa51402018-10-01 12:31:19 +03001376 tb_dbg(tb, "suspending...\n");
Mika Westerberg81a2e3e2020-05-16 16:20:39 +03001377 tb_disconnect_and_release_dp(tb);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001378 tb_switch_suspend(tb->root_switch, false);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001379 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +03001380 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001381
1382 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001383}
1384
Mika Westerberg91c0c122019-03-21 19:03:00 +02001385static void tb_restore_children(struct tb_switch *sw)
1386{
1387 struct tb_port *port;
1388
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001389 /* No need to restore if the router is already unplugged */
1390 if (sw->is_unplugged)
1391 return;
1392
Rajmohan Manicf29b9af2019-12-17 15:33:43 +03001393 if (tb_enable_tmu(sw))
1394 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1395
Mika Westerberg91c0c122019-03-21 19:03:00 +02001396 tb_switch_for_each_port(sw, port) {
Mika Westerberg284652a2020-04-09 14:23:32 +03001397 if (!tb_port_has_remote(port) && !port->xdomain)
Mika Westerberg91c0c122019-03-21 19:03:00 +02001398 continue;
1399
Mika Westerberg284652a2020-04-09 14:23:32 +03001400 if (port->remote) {
1401 tb_switch_lane_bonding_enable(port->remote->sw);
1402 tb_switch_configure_link(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +02001403
Mika Westerberg284652a2020-04-09 14:23:32 +03001404 tb_restore_children(port->remote->sw);
1405 } else if (port->xdomain) {
1406 tb_port_configure_xdomain(port);
1407 }
Mika Westerberg91c0c122019-03-21 19:03:00 +02001408 }
1409}
1410
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001411static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001412{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001413 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001414 struct tb_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001415
Mika Westerbergdaa51402018-10-01 12:31:19 +03001416 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001417
1418 /* remove any pci devices the firmware might have setup */
Mika Westerberg356b6c42019-09-19 15:25:30 +03001419 tb_switch_reset(tb->root_switch);
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001420
1421 tb_switch_resume(tb->root_switch);
1422 tb_free_invalid_tunnels(tb);
1423 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg91c0c122019-03-21 19:03:00 +02001424 tb_restore_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001425 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001426 tb_tunnel_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001427 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001428 /*
1429 * the pcie links need some time to get going.
1430 * 100ms works for me...
1431 */
Mika Westerbergdaa51402018-10-01 12:31:19 +03001432 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001433 msleep(100);
1434 }
1435 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001436 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +03001437 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001438
1439 return 0;
1440}
1441
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001442static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1443{
Mika Westerbergb433d012019-09-30 14:07:22 +03001444 struct tb_port *port;
1445 int ret = 0;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001446
Mika Westerbergb433d012019-09-30 14:07:22 +03001447 tb_switch_for_each_port(sw, port) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001448 if (tb_is_upstream_port(port))
1449 continue;
1450 if (port->xdomain && port->xdomain->is_unplugged) {
Kranthi Kuntaladacb1282020-03-05 16:39:58 +02001451 tb_retimer_remove_all(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001452 tb_xdomain_remove(port->xdomain);
Mika Westerberg284652a2020-04-09 14:23:32 +03001453 tb_port_unconfigure_xdomain(port);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001454 port->xdomain = NULL;
1455 ret++;
1456 } else if (port->remote) {
1457 ret += tb_free_unplugged_xdomains(port->remote->sw);
1458 }
1459 }
1460
1461 return ret;
1462}
1463
Mika Westerberg884e4d52020-08-31 13:05:14 +03001464static int tb_freeze_noirq(struct tb *tb)
1465{
1466 struct tb_cm *tcm = tb_priv(tb);
1467
1468 tcm->hotplug_active = false;
1469 return 0;
1470}
1471
1472static int tb_thaw_noirq(struct tb *tb)
1473{
1474 struct tb_cm *tcm = tb_priv(tb);
1475
1476 tcm->hotplug_active = true;
1477 return 0;
1478}
1479
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001480static void tb_complete(struct tb *tb)
1481{
1482 /*
1483 * Release any unplugged XDomains and if there is a case where
1484 * another domain is swapped in place of unplugged XDomain we
1485 * need to run another rescan.
1486 */
1487 mutex_lock(&tb->lock);
1488 if (tb_free_unplugged_xdomains(tb->root_switch))
1489 tb_scan_switch(tb->root_switch);
1490 mutex_unlock(&tb->lock);
1491}
1492
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001493static int tb_runtime_suspend(struct tb *tb)
1494{
1495 struct tb_cm *tcm = tb_priv(tb);
1496
1497 mutex_lock(&tb->lock);
1498 tb_switch_suspend(tb->root_switch, true);
1499 tcm->hotplug_active = false;
1500 mutex_unlock(&tb->lock);
1501
1502 return 0;
1503}
1504
1505static void tb_remove_work(struct work_struct *work)
1506{
1507 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1508 struct tb *tb = tcm_to_tb(tcm);
1509
1510 mutex_lock(&tb->lock);
1511 if (tb->root_switch) {
1512 tb_free_unplugged_children(tb->root_switch);
1513 tb_free_unplugged_xdomains(tb->root_switch);
1514 }
1515 mutex_unlock(&tb->lock);
1516}
1517
1518static int tb_runtime_resume(struct tb *tb)
1519{
1520 struct tb_cm *tcm = tb_priv(tb);
1521 struct tb_tunnel *tunnel, *n;
1522
1523 mutex_lock(&tb->lock);
1524 tb_switch_resume(tb->root_switch);
1525 tb_free_invalid_tunnels(tb);
1526 tb_restore_children(tb->root_switch);
1527 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1528 tb_tunnel_restart(tunnel);
1529 tcm->hotplug_active = true;
1530 mutex_unlock(&tb->lock);
1531
1532 /*
1533 * Schedule cleanup of any unplugged devices. Run this in a
1534 * separate thread to avoid possible deadlock if the device
1535 * removal runtime resumes the unplugged device.
1536 */
1537 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1538 return 0;
1539}
1540
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001541static const struct tb_cm_ops tb_cm_ops = {
1542 .start = tb_start,
1543 .stop = tb_stop,
1544 .suspend_noirq = tb_suspend_noirq,
1545 .resume_noirq = tb_resume_noirq,
Mika Westerberg884e4d52020-08-31 13:05:14 +03001546 .freeze_noirq = tb_freeze_noirq,
1547 .thaw_noirq = tb_thaw_noirq,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001548 .complete = tb_complete,
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001549 .runtime_suspend = tb_runtime_suspend,
1550 .runtime_resume = tb_runtime_resume,
Mika Westerberg81a54b52017-06-06 15:25:09 +03001551 .handle_event = tb_handle_event,
Mika Westerberg3da88be2020-11-10 11:47:14 +03001552 .disapprove_switch = tb_disconnect_pci,
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001553 .approve_switch = tb_tunnel_pci,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001554 .approve_xdomain_paths = tb_approve_xdomain_paths,
1555 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001556};
1557
1558struct tb *tb_probe(struct tb_nhi *nhi)
1559{
1560 struct tb_cm *tcm;
1561 struct tb *tb;
1562
Mika Westerberg7f0a34d2020-12-29 13:44:57 +02001563 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001564 if (!tb)
1565 return NULL;
1566
Mika Westerbergc6da62a2020-02-18 16:14:42 +02001567 if (tb_acpi_may_tunnel_pcie())
1568 tb->security_level = TB_SECURITY_USER;
1569 else
1570 tb->security_level = TB_SECURITY_NOPCIE;
1571
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001572 tb->cm_ops = &tb_cm_ops;
1573
1574 tcm = tb_priv(tb);
1575 INIT_LIST_HEAD(&tcm->tunnel_list);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001576 INIT_LIST_HEAD(&tcm->dp_resources);
Mika Westerberg6ac6fae2020-06-05 14:25:02 +03001577 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001578
Mika Westerberge0258802020-11-10 11:02:31 +03001579 tb_dbg(tb, "using software connection manager\n");
1580
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001581 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001582}