blob: 107cd232f486c8203ffae944fe28d4f1e266af98 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
12
13#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020014#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020015#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020016
Mika Westerberg9d3cce02017-06-06 15:25:00 +030017/**
18 * struct tb_cm - Simple Thunderbolt connection manager
19 * @tunnel_list: List of active tunnels
Mika Westerberg8afe9092019-03-26 15:52:30 +030020 * @dp_resources: List of available DP resources for DP tunneling
Mika Westerberg9d3cce02017-06-06 15:25:00 +030021 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
25 */
26struct tb_cm {
27 struct list_head tunnel_list;
Mika Westerberg8afe9092019-03-26 15:52:30 +030028 struct list_head dp_resources;
Mika Westerberg9d3cce02017-06-06 15:25:00 +030029 bool hotplug_active;
30};
Andreas Noever9da672a2014-06-03 22:04:05 +020031
Mika Westerberg4f807e42018-09-17 16:30:49 +030032struct tb_hotplug_event {
33 struct work_struct work;
34 struct tb *tb;
35 u64 route;
36 u8 port;
37 bool unplug;
38};
39
40static void tb_handle_hotplug(struct work_struct *work);
41
42static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
43{
44 struct tb_hotplug_event *ev;
45
46 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
47 if (!ev)
48 return;
49
50 ev->tb = tb;
51 ev->route = route;
52 ev->port = port;
53 ev->unplug = unplug;
54 INIT_WORK(&ev->work, tb_handle_hotplug);
55 queue_work(tb->wq, &ev->work);
56}
57
Andreas Noever9da672a2014-06-03 22:04:05 +020058/* enumeration & hot plug handling */
59
Mika Westerberg8afe9092019-03-26 15:52:30 +030060static void tb_add_dp_resources(struct tb_switch *sw)
61{
62 struct tb_cm *tcm = tb_priv(sw->tb);
63 struct tb_port *port;
64
65 tb_switch_for_each_port(sw, port) {
66 if (!tb_port_is_dpin(port))
67 continue;
68
69 if (!tb_switch_query_dp_resource(sw, port))
70 continue;
71
72 list_add_tail(&port->list, &tcm->dp_resources);
73 tb_port_dbg(port, "DP IN resource available\n");
74 }
75}
76
77static void tb_remove_dp_resources(struct tb_switch *sw)
78{
79 struct tb_cm *tcm = tb_priv(sw->tb);
80 struct tb_port *port, *tmp;
81
82 /* Clear children resources first */
83 tb_switch_for_each_port(sw, port) {
84 if (tb_port_has_remote(port))
85 tb_remove_dp_resources(port->remote->sw);
86 }
87
88 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
89 if (port->sw == sw) {
90 tb_port_dbg(port, "DP OUT resource unavailable\n");
91 list_del_init(&port->list);
92 }
93 }
94}
95
Mika Westerberg0414bec2017-02-19 23:43:26 +020096static void tb_discover_tunnels(struct tb_switch *sw)
97{
98 struct tb *tb = sw->tb;
99 struct tb_cm *tcm = tb_priv(tb);
100 struct tb_port *port;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200101
Mika Westerbergb433d012019-09-30 14:07:22 +0300102 tb_switch_for_each_port(sw, port) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200103 struct tb_tunnel *tunnel = NULL;
104
Mika Westerberg0414bec2017-02-19 23:43:26 +0200105 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +0300106 case TB_TYPE_DP_HDMI_IN:
107 tunnel = tb_tunnel_discover_dp(tb, port);
108 break;
109
Mika Westerberg0414bec2017-02-19 23:43:26 +0200110 case TB_TYPE_PCIE_DOWN:
111 tunnel = tb_tunnel_discover_pci(tb, port);
112 break;
113
Rajmohan Manie6f81852019-12-17 15:33:44 +0300114 case TB_TYPE_USB3_DOWN:
115 tunnel = tb_tunnel_discover_usb3(tb, port);
116 break;
117
Mika Westerberg0414bec2017-02-19 23:43:26 +0200118 default:
119 break;
120 }
121
Mika Westerberg4f807e42018-09-17 16:30:49 +0300122 if (!tunnel)
123 continue;
124
125 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200126 struct tb_switch *parent = tunnel->dst_port->sw;
127
128 while (parent != tunnel->src_port->sw) {
129 parent->boot = true;
130 parent = tb_switch_parent(parent);
131 }
Mika Westerberg0414bec2017-02-19 23:43:26 +0200132 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300133
134 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200135 }
136
Mika Westerbergb433d012019-09-30 14:07:22 +0300137 tb_switch_for_each_port(sw, port) {
138 if (tb_port_has_remote(port))
139 tb_discover_tunnels(port->remote->sw);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200140 }
141}
Andreas Noever9da672a2014-06-03 22:04:05 +0200142
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300143static void tb_scan_xdomain(struct tb_port *port)
144{
145 struct tb_switch *sw = port->sw;
146 struct tb *tb = sw->tb;
147 struct tb_xdomain *xd;
148 u64 route;
149
150 route = tb_downstream_route(port);
151 xd = tb_xdomain_find_by_route(tb, route);
152 if (xd) {
153 tb_xdomain_put(xd);
154 return;
155 }
156
157 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
158 NULL);
159 if (xd) {
160 tb_port_at(route, sw)->xdomain = xd;
161 tb_xdomain_add(xd);
162 }
163}
164
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300165static int tb_enable_tmu(struct tb_switch *sw)
166{
167 int ret;
168
169 /* If it is already enabled in correct mode, don't touch it */
170 if (tb_switch_tmu_is_enabled(sw))
171 return 0;
172
173 ret = tb_switch_tmu_disable(sw);
174 if (ret)
175 return ret;
176
177 ret = tb_switch_tmu_post_time(sw);
178 if (ret)
179 return ret;
180
181 return tb_switch_tmu_enable(sw);
182}
183
Rajmohan Manie6f81852019-12-17 15:33:44 +0300184/**
185 * tb_find_unused_port() - return the first inactive port on @sw
186 * @sw: Switch to find the port on
187 * @type: Port type to look for
188 */
189static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
190 enum tb_port_type type)
191{
192 struct tb_port *port;
193
194 tb_switch_for_each_port(sw, port) {
195 if (tb_is_upstream_port(port))
196 continue;
197 if (port->config.type != type)
198 continue;
199 if (!port->cap_adap)
200 continue;
201 if (tb_port_is_enabled(port))
202 continue;
203 return port;
204 }
205 return NULL;
206}
207
208static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
209 const struct tb_port *port)
210{
211 struct tb_port *down;
212
213 down = usb4_switch_map_usb3_down(sw, port);
214 if (down) {
215 if (WARN_ON(!tb_port_is_usb3_down(down)))
216 goto out;
217 if (WARN_ON(tb_usb3_port_is_enabled(down)))
218 goto out;
219
220 return down;
221 }
222
223out:
224 return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
225}
226
227static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
228{
229 struct tb_switch *parent = tb_switch_parent(sw);
230 struct tb_port *up, *down, *port;
231 struct tb_cm *tcm = tb_priv(tb);
232 struct tb_tunnel *tunnel;
233
234 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
235 if (!up)
236 return 0;
237
238 /*
239 * Look up available down port. Since we are chaining it should
240 * be found right above this switch.
241 */
242 port = tb_port_at(tb_route(sw), parent);
243 down = tb_find_usb3_down(parent, port);
244 if (!down)
245 return 0;
246
247 if (tb_route(parent)) {
248 struct tb_port *parent_up;
249 /*
250 * Check first that the parent switch has its upstream USB3
251 * port enabled. Otherwise the chain is not complete and
252 * there is no point setting up a new tunnel.
253 */
254 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
255 if (!parent_up || !tb_port_is_enabled(parent_up))
256 return 0;
257 }
258
259 tunnel = tb_tunnel_alloc_usb3(tb, up, down);
260 if (!tunnel)
261 return -ENOMEM;
262
263 if (tb_tunnel_activate(tunnel)) {
264 tb_port_info(up,
265 "USB3 tunnel activation failed, aborting\n");
266 tb_tunnel_free(tunnel);
267 return -EIO;
268 }
269
270 list_add_tail(&tunnel->list, &tcm->tunnel_list);
271 return 0;
272}
273
274static int tb_create_usb3_tunnels(struct tb_switch *sw)
275{
276 struct tb_port *port;
277 int ret;
278
279 if (tb_route(sw)) {
280 ret = tb_tunnel_usb3(sw->tb, sw);
281 if (ret)
282 return ret;
283 }
284
285 tb_switch_for_each_port(sw, port) {
286 if (!tb_port_has_remote(port))
287 continue;
288 ret = tb_create_usb3_tunnels(port->remote->sw);
289 if (ret)
290 return ret;
291 }
292
293 return 0;
294}
295
Andreas Noever9da672a2014-06-03 22:04:05 +0200296static void tb_scan_port(struct tb_port *port);
297
298/**
299 * tb_scan_switch() - scan for and initialize downstream switches
300 */
301static void tb_scan_switch(struct tb_switch *sw)
302{
Mika Westerbergb433d012019-09-30 14:07:22 +0300303 struct tb_port *port;
304
305 tb_switch_for_each_port(sw, port)
306 tb_scan_port(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200307}
308
309/**
310 * tb_scan_port() - check for and initialize switches below port
311 */
312static void tb_scan_port(struct tb_port *port)
313{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200314 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200315 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200316 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200317
Andreas Noever9da672a2014-06-03 22:04:05 +0200318 if (tb_is_upstream_port(port))
319 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300320
321 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
322 !tb_dp_port_is_enabled(port)) {
323 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
324 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
325 false);
326 return;
327 }
328
Andreas Noever9da672a2014-06-03 22:04:05 +0200329 if (port->config.type != TB_TYPE_PORT)
330 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200331 if (port->dual_link_port && port->link_nr)
332 return; /*
333 * Downstream switch is reachable through two ports.
334 * Only scan on the primary port (link_nr == 0).
335 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200336 if (tb_wait_for_port(port, false) <= 0)
337 return;
338 if (port->remote) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300339 tb_port_dbg(port, "port already has a remote\n");
Andreas Noever9da672a2014-06-03 22:04:05 +0200340 return;
341 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300342 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
343 tb_downstream_route(port));
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300344 if (IS_ERR(sw)) {
345 /*
346 * If there is an error accessing the connected switch
347 * it may be connected to another domain. Also we allow
348 * the other domain to be connected to a max depth switch.
349 */
350 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
351 tb_scan_xdomain(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200352 return;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300353 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300354
355 if (tb_switch_configure(sw)) {
356 tb_switch_put(sw);
357 return;
358 }
359
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200360 /*
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300361 * If there was previously another domain connected remove it
362 * first.
363 */
364 if (port->xdomain) {
365 tb_xdomain_remove(port->xdomain);
366 port->xdomain = NULL;
367 }
368
369 /*
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200370 * Do not send uevents until we have discovered all existing
371 * tunnels and know which switches were authorized already by
372 * the boot firmware.
373 */
374 if (!tcm->hotplug_active)
375 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300376
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300377 if (tb_switch_add(sw)) {
378 tb_switch_put(sw);
379 return;
380 }
381
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200382 /* Link the switches using both links if available */
383 upstream_port = tb_upstream_port(sw);
384 port->remote = upstream_port;
385 upstream_port->remote = port;
386 if (port->dual_link_port && upstream_port->dual_link_port) {
387 port->dual_link_port->remote = upstream_port->dual_link_port;
388 upstream_port->dual_link_port->remote = port->dual_link_port;
389 }
390
Mika Westerberg91c0c122019-03-21 19:03:00 +0200391 /* Enable lane bonding if supported */
392 if (tb_switch_lane_bonding_enable(sw))
393 tb_sw_warn(sw, "failed to enable lane bonding\n");
394
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300395 if (tb_enable_tmu(sw))
396 tb_sw_warn(sw, "failed to enable TMU\n");
397
Rajmohan Manie6f81852019-12-17 15:33:44 +0300398 /*
399 * Create USB 3.x tunnels only when the switch is plugged to the
400 * domain. This is because we scan the domain also during discovery
401 * and want to discover existing USB 3.x tunnels before we create
402 * any new.
403 */
404 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
405 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
406
Andreas Noever9da672a2014-06-03 22:04:05 +0200407 tb_scan_switch(sw);
408}
409
Mika Westerberg8afe9092019-03-26 15:52:30 +0300410static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
411 struct tb_port *src_port,
412 struct tb_port *dst_port)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300413{
414 struct tb_cm *tcm = tb_priv(tb);
415 struct tb_tunnel *tunnel;
416
417 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
418 if (tunnel->type == type &&
419 ((src_port && src_port == tunnel->src_port) ||
420 (dst_port && dst_port == tunnel->dst_port))) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300421 return tunnel;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300422 }
423 }
424
Mika Westerberg8afe9092019-03-26 15:52:30 +0300425 return NULL;
426}
427
428static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
429{
430 if (!tunnel)
431 return;
432
433 tb_tunnel_deactivate(tunnel);
434 list_del(&tunnel->list);
435
436 /*
437 * In case of DP tunnel make sure the DP IN resource is deallocated
438 * properly.
439 */
440 if (tb_tunnel_is_dp(tunnel)) {
441 struct tb_port *in = tunnel->src_port;
442
443 tb_switch_dealloc_dp_resource(in->sw, in);
444 }
445
446 tb_tunnel_free(tunnel);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300447}
448
Andreas Noever3364f0c2014-06-03 22:04:08 +0200449/**
450 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
451 */
452static void tb_free_invalid_tunnels(struct tb *tb)
453{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300454 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200455 struct tb_tunnel *tunnel;
456 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300457
458 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300459 if (tb_tunnel_is_invalid(tunnel))
460 tb_deactivate_and_free_tunnel(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200461 }
462}
463
464/**
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200465 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
466 */
467static void tb_free_unplugged_children(struct tb_switch *sw)
468{
Mika Westerbergb433d012019-09-30 14:07:22 +0300469 struct tb_port *port;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200470
Mika Westerbergb433d012019-09-30 14:07:22 +0300471 tb_switch_for_each_port(sw, port) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200472 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200473 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200474
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200475 if (port->remote->sw->is_unplugged) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300476 tb_remove_dp_resources(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200477 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300478 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200479 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200480 if (port->dual_link_port)
481 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200482 } else {
483 tb_free_unplugged_children(port->remote->sw);
484 }
485 }
486}
487
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200488static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
489 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200490{
Mika Westerbergb0407982019-12-17 15:33:40 +0300491 struct tb_port *down = NULL;
492
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200493 /*
494 * To keep plugging devices consistently in the same PCIe
Mika Westerbergb0407982019-12-17 15:33:40 +0300495 * hierarchy, do mapping here for switch downstream PCIe ports.
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200496 */
Mika Westerbergb0407982019-12-17 15:33:40 +0300497 if (tb_switch_is_usb4(sw)) {
498 down = usb4_switch_map_pcie_down(sw, port);
499 } else if (!tb_route(sw)) {
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200500 int phy_port = tb_phy_port_from_link(port->port);
501 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300502
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200503 /*
504 * Hard-coded Thunderbolt port to PCIe down port mapping
505 * per controller.
506 */
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200507 if (tb_switch_is_cactus_ridge(sw) ||
508 tb_switch_is_alpine_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200509 index = !phy_port ? 6 : 7;
Mika Westerberg17a8f812019-10-08 16:42:47 +0300510 else if (tb_switch_is_falcon_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200511 index = !phy_port ? 6 : 8;
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200512 else if (tb_switch_is_titan_ridge(sw))
513 index = !phy_port ? 8 : 9;
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200514 else
515 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200516
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200517 /* Validate the hard-coding */
518 if (WARN_ON(index > sw->config.max_port_number))
519 goto out;
Mika Westerbergb0407982019-12-17 15:33:40 +0300520
521 down = &sw->ports[index];
522 }
523
524 if (down) {
525 if (WARN_ON(!tb_port_is_pcie_down(down)))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200526 goto out;
Mika Westerbergb0407982019-12-17 15:33:40 +0300527 if (WARN_ON(tb_pci_port_is_enabled(down)))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200528 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200529
Mika Westerbergb0407982019-12-17 15:33:40 +0300530 return down;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200531 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200532
533out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300534 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200535}
536
Mika Westerberga11b88a2019-03-26 16:03:48 +0300537static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
538 struct tb_port *out)
539{
540 struct tb_switch *sw = out->sw;
541 struct tb_tunnel *tunnel;
542 int bw, available_bw = 40000;
543
544 while (sw && sw != in->sw) {
545 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
546 /* Leave 10% guard band */
547 bw -= bw / 10;
548
549 /*
550 * Check for any active DP tunnels that go through this
551 * switch and reduce their consumed bandwidth from
552 * available.
553 */
554 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
555 int consumed_bw;
556
557 if (!tb_tunnel_switch_on_path(tunnel, sw))
558 continue;
559
560 consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
561 if (consumed_bw < 0)
562 return consumed_bw;
563
564 bw -= consumed_bw;
565 }
566
567 if (bw < available_bw)
568 available_bw = bw;
569
570 sw = tb_switch_parent(sw);
571 }
572
573 return available_bw;
574}
575
Mika Westerberg8afe9092019-03-26 15:52:30 +0300576static void tb_tunnel_dp(struct tb *tb)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300577{
578 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300579 struct tb_port *port, *in, *out;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300580 struct tb_tunnel *tunnel;
Mika Westerberga11b88a2019-03-26 16:03:48 +0300581 int available_bw;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300582
Mika Westerberg8afe9092019-03-26 15:52:30 +0300583 /*
584 * Find pair of inactive DP IN and DP OUT adapters and then
585 * establish a DP tunnel between them.
586 */
587 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300588
Mika Westerberg8afe9092019-03-26 15:52:30 +0300589 in = NULL;
590 out = NULL;
591 list_for_each_entry(port, &tcm->dp_resources, list) {
592 if (tb_port_is_enabled(port)) {
593 tb_port_dbg(port, "in use\n");
594 continue;
595 }
596
597 tb_port_dbg(port, "available\n");
598
599 if (!in && tb_port_is_dpin(port))
600 in = port;
601 else if (!out && tb_port_is_dpout(port))
602 out = port;
603 }
604
605 if (!in) {
606 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
607 return;
608 }
609 if (!out) {
610 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
611 return;
612 }
613
614 if (tb_switch_alloc_dp_resource(in->sw, in)) {
615 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
616 return;
617 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300618
Mika Westerberga11b88a2019-03-26 16:03:48 +0300619 /* Calculate available bandwidth between in and out */
620 available_bw = tb_available_bw(tcm, in, out);
621 if (available_bw < 0) {
622 tb_warn(tb, "failed to determine available bandwidth\n");
623 return;
624 }
625
626 tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
627 available_bw);
628
629 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300630 if (!tunnel) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300631 tb_port_dbg(out, "could not allocate DP tunnel\n");
632 goto dealloc_dp;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300633 }
634
635 if (tb_tunnel_activate(tunnel)) {
636 tb_port_info(out, "DP tunnel activation failed, aborting\n");
637 tb_tunnel_free(tunnel);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300638 goto dealloc_dp;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300639 }
640
641 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300642 return;
643
644dealloc_dp:
645 tb_switch_dealloc_dp_resource(in->sw, in);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300646}
647
Mika Westerberg8afe9092019-03-26 15:52:30 +0300648static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300649{
Mika Westerberg8afe9092019-03-26 15:52:30 +0300650 struct tb_port *in, *out;
651 struct tb_tunnel *tunnel;
652
653 if (tb_port_is_dpin(port)) {
654 tb_port_dbg(port, "DP IN resource unavailable\n");
655 in = port;
656 out = NULL;
657 } else {
658 tb_port_dbg(port, "DP OUT resource unavailable\n");
659 in = NULL;
660 out = port;
661 }
662
663 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
664 tb_deactivate_and_free_tunnel(tunnel);
665 list_del_init(&port->list);
666
667 /*
668 * See if there is another DP OUT port that can be used for
669 * to create another tunnel.
670 */
671 tb_tunnel_dp(tb);
672}
673
674static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
675{
676 struct tb_cm *tcm = tb_priv(tb);
677 struct tb_port *p;
678
679 if (tb_port_is_enabled(port))
680 return;
681
682 list_for_each_entry(p, &tcm->dp_resources, list) {
683 if (p == port)
684 return;
685 }
686
687 tb_port_dbg(port, "DP %s resource available\n",
688 tb_port_is_dpin(port) ? "IN" : "OUT");
689 list_add_tail(&port->list, &tcm->dp_resources);
690
691 /* Look for suitable DP IN <-> DP OUT pairs now */
692 tb_tunnel_dp(tb);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300693}
694
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200695static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
696{
697 struct tb_port *up, *down, *port;
698 struct tb_cm *tcm = tb_priv(tb);
699 struct tb_switch *parent_sw;
700 struct tb_tunnel *tunnel;
701
Mika Westerberg386e5e22019-12-17 15:33:37 +0300702 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200703 if (!up)
704 return 0;
705
706 /*
707 * Look up available down port. Since we are chaining it should
708 * be found right above this switch.
709 */
710 parent_sw = tb_to_switch(sw->dev.parent);
711 port = tb_port_at(tb_route(sw), parent_sw);
712 down = tb_find_pcie_down(parent_sw, port);
713 if (!down)
714 return 0;
715
716 tunnel = tb_tunnel_alloc_pci(tb, up, down);
717 if (!tunnel)
718 return -ENOMEM;
719
720 if (tb_tunnel_activate(tunnel)) {
721 tb_port_info(up,
722 "PCIe tunnel activation failed, aborting\n");
723 tb_tunnel_free(tunnel);
724 return -EIO;
725 }
726
727 list_add_tail(&tunnel->list, &tcm->tunnel_list);
728 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200729}
Andreas Noever9da672a2014-06-03 22:04:05 +0200730
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300731static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
732{
733 struct tb_cm *tcm = tb_priv(tb);
734 struct tb_port *nhi_port, *dst_port;
735 struct tb_tunnel *tunnel;
736 struct tb_switch *sw;
737
738 sw = tb_to_switch(xd->dev.parent);
739 dst_port = tb_port_at(xd->route, sw);
Mika Westerberg386e5e22019-12-17 15:33:37 +0300740 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300741
742 mutex_lock(&tb->lock);
743 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
744 xd->transmit_path, xd->receive_ring,
745 xd->receive_path);
746 if (!tunnel) {
747 mutex_unlock(&tb->lock);
748 return -ENOMEM;
749 }
750
751 if (tb_tunnel_activate(tunnel)) {
752 tb_port_info(nhi_port,
753 "DMA tunnel activation failed, aborting\n");
754 tb_tunnel_free(tunnel);
755 mutex_unlock(&tb->lock);
756 return -EIO;
757 }
758
759 list_add_tail(&tunnel->list, &tcm->tunnel_list);
760 mutex_unlock(&tb->lock);
761 return 0;
762}
763
764static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
765{
766 struct tb_port *dst_port;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300767 struct tb_tunnel *tunnel;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300768 struct tb_switch *sw;
769
770 sw = tb_to_switch(xd->dev.parent);
771 dst_port = tb_port_at(xd->route, sw);
772
773 /*
774 * It is possible that the tunnel was already teared down (in
775 * case of cable disconnect) so it is fine if we cannot find it
776 * here anymore.
777 */
Mika Westerberg8afe9092019-03-26 15:52:30 +0300778 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
779 tb_deactivate_and_free_tunnel(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300780}
781
782static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
783{
784 if (!xd->is_unplugged) {
785 mutex_lock(&tb->lock);
786 __tb_disconnect_xdomain_paths(tb, xd);
787 mutex_unlock(&tb->lock);
788 }
789 return 0;
790}
791
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200792/* hotplug handling */
793
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200794/**
795 * tb_handle_hotplug() - handle hotplug event
796 *
797 * Executes on tb->wq.
798 */
799static void tb_handle_hotplug(struct work_struct *work)
800{
801 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
802 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300803 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200804 struct tb_switch *sw;
805 struct tb_port *port;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200806 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300807 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200808 goto out; /* during init, suspend or shutdown */
809
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200810 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +0200811 if (!sw) {
812 tb_warn(tb,
813 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
814 ev->route, ev->port, ev->unplug);
815 goto out;
816 }
817 if (ev->port > sw->config.max_port_number) {
818 tb_warn(tb,
819 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
820 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200821 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200822 }
823 port = &sw->ports[ev->port];
824 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200825 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
826 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200827 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200828 }
829 if (ev->unplug) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200830 if (tb_port_has_remote(port)) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300831 tb_port_dbg(port, "switch unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100832 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200833 tb_free_invalid_tunnels(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300834 tb_remove_dp_resources(port->remote->sw);
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300835 tb_switch_tmu_disable(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200836 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300837 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200838 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200839 if (port->dual_link_port)
840 port->dual_link_port->remote = NULL;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300841 /* Maybe we can create another DP tunnel */
842 tb_tunnel_dp(tb);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300843 } else if (port->xdomain) {
844 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
845
846 tb_port_dbg(port, "xdomain unplugged\n");
847 /*
848 * Service drivers are unbound during
849 * tb_xdomain_remove() so setting XDomain as
850 * unplugged here prevents deadlock if they call
851 * tb_xdomain_disable_paths(). We will tear down
852 * the path below.
853 */
854 xd->is_unplugged = true;
855 tb_xdomain_remove(xd);
856 port->xdomain = NULL;
857 __tb_disconnect_xdomain_paths(tb, xd);
858 tb_xdomain_put(xd);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300859 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
860 tb_dp_resource_unavailable(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +0200861 } else {
Mika Westerberg62efe692018-09-17 16:32:13 +0300862 tb_port_dbg(port,
863 "got unplug event for disconnected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200864 }
865 } else if (port->remote) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300866 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200867 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +0300868 if (tb_port_is_null(port)) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300869 tb_port_dbg(port, "hotplug: scanning\n");
Mika Westerberg344e0642017-10-11 17:19:54 +0300870 tb_scan_port(port);
871 if (!port->remote)
Mika Westerberg62efe692018-09-17 16:32:13 +0300872 tb_port_dbg(port, "hotplug: no switch found\n");
Mika Westerberg8afe9092019-03-26 15:52:30 +0300873 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
874 tb_dp_resource_available(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +0300875 }
Andreas Noever053596d2014-06-03 22:04:06 +0200876 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200877
878put_sw:
879 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200880out:
881 mutex_unlock(&tb->lock);
882 kfree(ev);
883}
884
885/**
886 * tb_schedule_hotplug_handler() - callback function for the control channel
887 *
888 * Delegates to tb_handle_hotplug.
889 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300890static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
891 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200892{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300893 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300894 u64 route;
895
896 if (type != TB_CFG_PKG_EVENT) {
897 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
898 return;
899 }
900
901 route = tb_cfg_get_route(&pkg->header);
902
Mika Westerberg210e9f52019-12-17 15:33:39 +0300903 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
Mika Westerberg81a54b52017-06-06 15:25:09 +0300904 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
905 pkg->port);
906 }
907
Mika Westerberg4f807e42018-09-17 16:30:49 +0300908 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200909}
910
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300911static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200912{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300913 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200914 struct tb_tunnel *tunnel;
915 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200916
Andreas Noever3364f0c2014-06-03 22:04:08 +0200917 /* tunnels are only present after everything has been initialized */
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300918 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
919 /*
920 * DMA tunnels require the driver to be functional so we
921 * tear them down. Other protocol tunnels can be left
922 * intact.
923 */
924 if (tb_tunnel_is_dma(tunnel))
925 tb_tunnel_deactivate(tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200926 tb_tunnel_free(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300927 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300928 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300929 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200930}
931
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200932static int tb_scan_finalize_switch(struct device *dev, void *data)
933{
934 if (tb_is_switch(dev)) {
935 struct tb_switch *sw = tb_to_switch(dev);
936
937 /*
938 * If we found that the switch was already setup by the
939 * boot firmware, mark it as authorized now before we
940 * send uevent to userspace.
941 */
942 if (sw->boot)
943 sw->authorized = 1;
944
945 dev_set_uevent_suppress(dev, false);
946 kobject_uevent(&dev->kobj, KOBJ_ADD);
947 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
948 }
949
950 return 0;
951}
952
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300953static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200954{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300955 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300956 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200957
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300958 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Mika Westerberg444ac382018-12-30 12:17:52 +0200959 if (IS_ERR(tb->root_switch))
960 return PTR_ERR(tb->root_switch);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200961
Mika Westerberge6b245c2017-06-06 15:25:17 +0300962 /*
963 * ICM firmware upgrade needs running firmware and in native
964 * mode that is not available so disable firmware upgrade of the
965 * root switch.
966 */
967 tb->root_switch->no_nvm_upgrade = true;
968
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300969 ret = tb_switch_configure(tb->root_switch);
970 if (ret) {
971 tb_switch_put(tb->root_switch);
972 return ret;
973 }
974
975 /* Announce the switch to the world */
976 ret = tb_switch_add(tb->root_switch);
977 if (ret) {
978 tb_switch_put(tb->root_switch);
979 return ret;
980 }
981
Rajmohan Manicf29b9af2019-12-17 15:33:43 +0300982 /* Enable TMU if it is off */
983 tb_switch_tmu_enable(tb->root_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +0200984 /* Full scan to discover devices added before the driver was loaded. */
985 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200986 /* Find out tunnels created by the boot firmware */
987 tb_discover_tunnels(tb->root_switch);
Rajmohan Manie6f81852019-12-17 15:33:44 +0300988 /*
989 * If the boot firmware did not create USB 3.x tunnels create them
990 * now for the whole topology.
991 */
992 tb_create_usb3_tunnels(tb->root_switch);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300993 /* Add DP IN resources for the root switch */
994 tb_add_dp_resources(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200995 /* Make the discovered switches available to the userspace */
996 device_for_each_child(&tb->root_switch->dev, NULL,
997 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +0200998
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200999 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001000 tcm->hotplug_active = true;
1001 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001002}
1003
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001004static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001005{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001006 struct tb_cm *tcm = tb_priv(tb);
1007
Mika Westerbergdaa51402018-10-01 12:31:19 +03001008 tb_dbg(tb, "suspending...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001009 tb_switch_suspend(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001010 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +03001011 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001012
1013 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001014}
1015
Mika Westerberg91c0c122019-03-21 19:03:00 +02001016static void tb_restore_children(struct tb_switch *sw)
1017{
1018 struct tb_port *port;
1019
Rajmohan Manicf29b9af2019-12-17 15:33:43 +03001020 if (tb_enable_tmu(sw))
1021 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1022
Mika Westerberg91c0c122019-03-21 19:03:00 +02001023 tb_switch_for_each_port(sw, port) {
1024 if (!tb_port_has_remote(port))
1025 continue;
1026
1027 if (tb_switch_lane_bonding_enable(port->remote->sw))
1028 dev_warn(&sw->dev, "failed to restore lane bonding\n");
1029
1030 tb_restore_children(port->remote->sw);
1031 }
1032}
1033
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001034static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001035{
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001036 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001037 struct tb_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001038
Mika Westerbergdaa51402018-10-01 12:31:19 +03001039 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001040
1041 /* remove any pci devices the firmware might have setup */
1042 tb_switch_reset(tb, 0);
1043
1044 tb_switch_resume(tb->root_switch);
1045 tb_free_invalid_tunnels(tb);
1046 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg91c0c122019-03-21 19:03:00 +02001047 tb_restore_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001048 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Mika Westerberg93f36ad2017-02-19 13:48:29 +02001049 tb_tunnel_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001050 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001051 /*
1052 * the pcie links need some time to get going.
1053 * 100ms works for me...
1054 */
Mika Westerbergdaa51402018-10-01 12:31:19 +03001055 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001056 msleep(100);
1057 }
1058 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001059 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +03001060 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001061
1062 return 0;
1063}
1064
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001065static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1066{
Mika Westerbergb433d012019-09-30 14:07:22 +03001067 struct tb_port *port;
1068 int ret = 0;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001069
Mika Westerbergb433d012019-09-30 14:07:22 +03001070 tb_switch_for_each_port(sw, port) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001071 if (tb_is_upstream_port(port))
1072 continue;
1073 if (port->xdomain && port->xdomain->is_unplugged) {
1074 tb_xdomain_remove(port->xdomain);
1075 port->xdomain = NULL;
1076 ret++;
1077 } else if (port->remote) {
1078 ret += tb_free_unplugged_xdomains(port->remote->sw);
1079 }
1080 }
1081
1082 return ret;
1083}
1084
1085static void tb_complete(struct tb *tb)
1086{
1087 /*
1088 * Release any unplugged XDomains and if there is a case where
1089 * another domain is swapped in place of unplugged XDomain we
1090 * need to run another rescan.
1091 */
1092 mutex_lock(&tb->lock);
1093 if (tb_free_unplugged_xdomains(tb->root_switch))
1094 tb_scan_switch(tb->root_switch);
1095 mutex_unlock(&tb->lock);
1096}
1097
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001098static const struct tb_cm_ops tb_cm_ops = {
1099 .start = tb_start,
1100 .stop = tb_stop,
1101 .suspend_noirq = tb_suspend_noirq,
1102 .resume_noirq = tb_resume_noirq,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001103 .complete = tb_complete,
Mika Westerberg81a54b52017-06-06 15:25:09 +03001104 .handle_event = tb_handle_event,
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001105 .approve_switch = tb_tunnel_pci,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +03001106 .approve_xdomain_paths = tb_approve_xdomain_paths,
1107 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001108};
1109
1110struct tb *tb_probe(struct tb_nhi *nhi)
1111{
1112 struct tb_cm *tcm;
1113 struct tb *tb;
1114
1115 tb = tb_domain_alloc(nhi, sizeof(*tcm));
1116 if (!tb)
1117 return NULL;
1118
Mika Westerberg99cabbb2018-12-30 21:34:08 +02001119 tb->security_level = TB_SECURITY_USER;
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001120 tb->cm_ops = &tb_cm_ops;
1121
1122 tcm = tb_priv(tb);
1123 INIT_LIST_HEAD(&tcm->tunnel_list);
Mika Westerberg8afe9092019-03-26 15:52:30 +03001124 INIT_LIST_HEAD(&tcm->dp_resources);
Mika Westerberg9d3cce02017-06-06 15:25:00 +03001125
1126 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +02001127}