blob: 1f7a9e1cc09c4993443efeb9ef3c7d8f28c719ad [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
Lukas Wunner630b3af2017-08-01 14:10:41 +020012#include <linux/platform_data/x86/apple.h>
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020013
14#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020015#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020016#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020017
Mika Westerberg9d3cce02017-06-06 15:25:00 +030018/**
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
25 */
26struct tb_cm {
27 struct list_head tunnel_list;
28 bool hotplug_active;
29};
Andreas Noever9da672a2014-06-03 22:04:05 +020030
Mika Westerberg4f807e42018-09-17 16:30:49 +030031struct tb_hotplug_event {
32 struct work_struct work;
33 struct tb *tb;
34 u64 route;
35 u8 port;
36 bool unplug;
37};
38
39static void tb_handle_hotplug(struct work_struct *work);
40
41static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42{
43 struct tb_hotplug_event *ev;
44
45 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46 if (!ev)
47 return;
48
49 ev->tb = tb;
50 ev->route = route;
51 ev->port = port;
52 ev->unplug = unplug;
53 INIT_WORK(&ev->work, tb_handle_hotplug);
54 queue_work(tb->wq, &ev->work);
55}
56
Andreas Noever9da672a2014-06-03 22:04:05 +020057/* enumeration & hot plug handling */
58
Mika Westerberg0414bec2017-02-19 23:43:26 +020059static void tb_discover_tunnels(struct tb_switch *sw)
60{
61 struct tb *tb = sw->tb;
62 struct tb_cm *tcm = tb_priv(tb);
63 struct tb_port *port;
64 int i;
65
66 for (i = 1; i <= sw->config.max_port_number; i++) {
67 struct tb_tunnel *tunnel = NULL;
68
69 port = &sw->ports[i];
70 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +030071 case TB_TYPE_DP_HDMI_IN:
72 tunnel = tb_tunnel_discover_dp(tb, port);
73 break;
74
Mika Westerberg0414bec2017-02-19 23:43:26 +020075 case TB_TYPE_PCIE_DOWN:
76 tunnel = tb_tunnel_discover_pci(tb, port);
77 break;
78
79 default:
80 break;
81 }
82
Mika Westerberg4f807e42018-09-17 16:30:49 +030083 if (!tunnel)
84 continue;
85
86 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +020087 struct tb_switch *parent = tunnel->dst_port->sw;
88
89 while (parent != tunnel->src_port->sw) {
90 parent->boot = true;
91 parent = tb_switch_parent(parent);
92 }
Mika Westerberg0414bec2017-02-19 23:43:26 +020093 }
Mika Westerberg4f807e42018-09-17 16:30:49 +030094
95 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0414bec2017-02-19 23:43:26 +020096 }
97
98 for (i = 1; i <= sw->config.max_port_number; i++) {
99 if (tb_port_has_remote(&sw->ports[i]))
100 tb_discover_tunnels(sw->ports[i].remote->sw);
101 }
102}
Andreas Noever9da672a2014-06-03 22:04:05 +0200103
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300104static void tb_scan_xdomain(struct tb_port *port)
105{
106 struct tb_switch *sw = port->sw;
107 struct tb *tb = sw->tb;
108 struct tb_xdomain *xd;
109 u64 route;
110
111 route = tb_downstream_route(port);
112 xd = tb_xdomain_find_by_route(tb, route);
113 if (xd) {
114 tb_xdomain_put(xd);
115 return;
116 }
117
118 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
119 NULL);
120 if (xd) {
121 tb_port_at(route, sw)->xdomain = xd;
122 tb_xdomain_add(xd);
123 }
124}
125
Andreas Noever9da672a2014-06-03 22:04:05 +0200126static void tb_scan_port(struct tb_port *port);
127
128/**
129 * tb_scan_switch() - scan for and initialize downstream switches
130 */
131static void tb_scan_switch(struct tb_switch *sw)
132{
133 int i;
134 for (i = 1; i <= sw->config.max_port_number; i++)
135 tb_scan_port(&sw->ports[i]);
136}
137
138/**
139 * tb_scan_port() - check for and initialize switches below port
140 */
141static void tb_scan_port(struct tb_port *port)
142{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200143 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200144 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200145 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200146
Andreas Noever9da672a2014-06-03 22:04:05 +0200147 if (tb_is_upstream_port(port))
148 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300149
150 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
151 !tb_dp_port_is_enabled(port)) {
152 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
153 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
154 false);
155 return;
156 }
157
Andreas Noever9da672a2014-06-03 22:04:05 +0200158 if (port->config.type != TB_TYPE_PORT)
159 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200160 if (port->dual_link_port && port->link_nr)
161 return; /*
162 * Downstream switch is reachable through two ports.
163 * Only scan on the primary port (link_nr == 0).
164 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200165 if (tb_wait_for_port(port, false) <= 0)
166 return;
167 if (port->remote) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300168 tb_port_dbg(port, "port already has a remote\n");
Andreas Noever9da672a2014-06-03 22:04:05 +0200169 return;
170 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300171 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
172 tb_downstream_route(port));
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300173 if (IS_ERR(sw)) {
174 /*
175 * If there is an error accessing the connected switch
176 * it may be connected to another domain. Also we allow
177 * the other domain to be connected to a max depth switch.
178 */
179 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
180 tb_scan_xdomain(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200181 return;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300182 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300183
184 if (tb_switch_configure(sw)) {
185 tb_switch_put(sw);
186 return;
187 }
188
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200189 /*
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300190 * If there was previously another domain connected remove it
191 * first.
192 */
193 if (port->xdomain) {
194 tb_xdomain_remove(port->xdomain);
195 port->xdomain = NULL;
196 }
197
198 /*
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200199 * Do not send uevents until we have discovered all existing
200 * tunnels and know which switches were authorized already by
201 * the boot firmware.
202 */
203 if (!tcm->hotplug_active)
204 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300205
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300206 if (tb_switch_add(sw)) {
207 tb_switch_put(sw);
208 return;
209 }
210
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200211 /* Link the switches using both links if available */
212 upstream_port = tb_upstream_port(sw);
213 port->remote = upstream_port;
214 upstream_port->remote = port;
215 if (port->dual_link_port && upstream_port->dual_link_port) {
216 port->dual_link_port->remote = upstream_port->dual_link_port;
217 upstream_port->dual_link_port->remote = port->dual_link_port;
218 }
219
Andreas Noever9da672a2014-06-03 22:04:05 +0200220 tb_scan_switch(sw);
221}
222
Mika Westerberg4f807e42018-09-17 16:30:49 +0300223static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
224 struct tb_port *src_port, struct tb_port *dst_port)
225{
226 struct tb_cm *tcm = tb_priv(tb);
227 struct tb_tunnel *tunnel;
228
229 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
230 if (tunnel->type == type &&
231 ((src_port && src_port == tunnel->src_port) ||
232 (dst_port && dst_port == tunnel->dst_port))) {
233 tb_tunnel_deactivate(tunnel);
234 list_del(&tunnel->list);
235 tb_tunnel_free(tunnel);
236 return 0;
237 }
238 }
239
240 return -ENODEV;
241}
242
Andreas Noever3364f0c2014-06-03 22:04:08 +0200243/**
244 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
245 */
246static void tb_free_invalid_tunnels(struct tb *tb)
247{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300248 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200249 struct tb_tunnel *tunnel;
250 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300251
252 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200253 if (tb_tunnel_is_invalid(tunnel)) {
254 tb_tunnel_deactivate(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300255 list_del(&tunnel->list);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200256 tb_tunnel_free(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200257 }
258 }
259}
260
261/**
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200262 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
263 */
264static void tb_free_unplugged_children(struct tb_switch *sw)
265{
266 int i;
267 for (i = 1; i <= sw->config.max_port_number; i++) {
268 struct tb_port *port = &sw->ports[i];
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200269
270 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200271 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200272
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200273 if (port->remote->sw->is_unplugged) {
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300274 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200275 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200276 if (port->dual_link_port)
277 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200278 } else {
279 tb_free_unplugged_children(port->remote->sw);
280 }
281 }
282}
283
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200284/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300285 * tb_find_port() - return the first port of @type on @sw or NULL
286 * @sw: Switch to find the port from
287 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200288 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300289static struct tb_port *tb_find_port(struct tb_switch *sw,
290 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200291{
292 int i;
293 for (i = 1; i <= sw->config.max_port_number; i++)
Mika Westerberge78db6f2017-10-12 16:45:50 +0300294 if (sw->ports[i].config.type == type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200295 return &sw->ports[i];
296 return NULL;
297}
298
299/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300300 * tb_find_unused_port() - return the first inactive port on @sw
301 * @sw: Switch to find the port on
302 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200303 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300304static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
305 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200306{
307 int i;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300308
Andreas Noever3364f0c2014-06-03 22:04:08 +0200309 for (i = 1; i <= sw->config.max_port_number; i++) {
310 if (tb_is_upstream_port(&sw->ports[i]))
311 continue;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300312 if (sw->ports[i].config.type != type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200313 continue;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300314 if (!sw->ports[i].cap_adap)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200315 continue;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300316 if (tb_port_is_enabled(&sw->ports[i]))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200317 continue;
318 return &sw->ports[i];
319 }
320 return NULL;
321}
322
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200323static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
324 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200325{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200326 /*
327 * To keep plugging devices consistently in the same PCIe
328 * hierarchy, do mapping here for root switch downstream PCIe
329 * ports.
330 */
331 if (!tb_route(sw)) {
332 int phy_port = tb_phy_port_from_link(port->port);
333 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300334
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200335 /*
336 * Hard-coded Thunderbolt port to PCIe down port mapping
337 * per controller.
338 */
339 if (tb_switch_is_cr(sw))
340 index = !phy_port ? 6 : 7;
341 else if (tb_switch_is_fr(sw))
342 index = !phy_port ? 6 : 8;
343 else
344 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200345
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200346 /* Validate the hard-coding */
347 if (WARN_ON(index > sw->config.max_port_number))
348 goto out;
349 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
350 goto out;
351 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
352 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200353
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200354 return &sw->ports[index];
Andreas Noever3364f0c2014-06-03 22:04:08 +0200355 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200356
357out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300358 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200359}
360
Mika Westerberg4f807e42018-09-17 16:30:49 +0300361static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
362{
363 struct tb_cm *tcm = tb_priv(tb);
364 struct tb_switch *sw = out->sw;
365 struct tb_tunnel *tunnel;
366 struct tb_port *in;
367
368 if (tb_port_is_enabled(out))
369 return 0;
370
371 do {
372 sw = tb_to_switch(sw->dev.parent);
373 if (!sw)
374 return 0;
375 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
376 } while (!in);
377
378 tunnel = tb_tunnel_alloc_dp(tb, in, out);
379 if (!tunnel) {
380 tb_port_dbg(out, "DP tunnel allocation failed\n");
381 return -ENOMEM;
382 }
383
384 if (tb_tunnel_activate(tunnel)) {
385 tb_port_info(out, "DP tunnel activation failed, aborting\n");
386 tb_tunnel_free(tunnel);
387 return -EIO;
388 }
389
390 list_add_tail(&tunnel->list, &tcm->tunnel_list);
391 return 0;
392}
393
394static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
395{
396 tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
397}
398
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200399static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
400{
401 struct tb_port *up, *down, *port;
402 struct tb_cm *tcm = tb_priv(tb);
403 struct tb_switch *parent_sw;
404 struct tb_tunnel *tunnel;
405
Mika Westerberge78db6f2017-10-12 16:45:50 +0300406 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200407 if (!up)
408 return 0;
409
410 /*
411 * Look up available down port. Since we are chaining it should
412 * be found right above this switch.
413 */
414 parent_sw = tb_to_switch(sw->dev.parent);
415 port = tb_port_at(tb_route(sw), parent_sw);
416 down = tb_find_pcie_down(parent_sw, port);
417 if (!down)
418 return 0;
419
420 tunnel = tb_tunnel_alloc_pci(tb, up, down);
421 if (!tunnel)
422 return -ENOMEM;
423
424 if (tb_tunnel_activate(tunnel)) {
425 tb_port_info(up,
426 "PCIe tunnel activation failed, aborting\n");
427 tb_tunnel_free(tunnel);
428 return -EIO;
429 }
430
431 list_add_tail(&tunnel->list, &tcm->tunnel_list);
432 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200433}
Andreas Noever9da672a2014-06-03 22:04:05 +0200434
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300435static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
436{
437 struct tb_cm *tcm = tb_priv(tb);
438 struct tb_port *nhi_port, *dst_port;
439 struct tb_tunnel *tunnel;
440 struct tb_switch *sw;
441
442 sw = tb_to_switch(xd->dev.parent);
443 dst_port = tb_port_at(xd->route, sw);
444 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
445
446 mutex_lock(&tb->lock);
447 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
448 xd->transmit_path, xd->receive_ring,
449 xd->receive_path);
450 if (!tunnel) {
451 mutex_unlock(&tb->lock);
452 return -ENOMEM;
453 }
454
455 if (tb_tunnel_activate(tunnel)) {
456 tb_port_info(nhi_port,
457 "DMA tunnel activation failed, aborting\n");
458 tb_tunnel_free(tunnel);
459 mutex_unlock(&tb->lock);
460 return -EIO;
461 }
462
463 list_add_tail(&tunnel->list, &tcm->tunnel_list);
464 mutex_unlock(&tb->lock);
465 return 0;
466}
467
468static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
469{
470 struct tb_port *dst_port;
471 struct tb_switch *sw;
472
473 sw = tb_to_switch(xd->dev.parent);
474 dst_port = tb_port_at(xd->route, sw);
475
476 /*
477 * It is possible that the tunnel was already teared down (in
478 * case of cable disconnect) so it is fine if we cannot find it
479 * here anymore.
480 */
481 tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
482}
483
484static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
485{
486 if (!xd->is_unplugged) {
487 mutex_lock(&tb->lock);
488 __tb_disconnect_xdomain_paths(tb, xd);
489 mutex_unlock(&tb->lock);
490 }
491 return 0;
492}
493
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200494/* hotplug handling */
495
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200496/**
497 * tb_handle_hotplug() - handle hotplug event
498 *
499 * Executes on tb->wq.
500 */
501static void tb_handle_hotplug(struct work_struct *work)
502{
503 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
504 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300505 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200506 struct tb_switch *sw;
507 struct tb_port *port;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200508 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300509 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200510 goto out; /* during init, suspend or shutdown */
511
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200512 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +0200513 if (!sw) {
514 tb_warn(tb,
515 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
516 ev->route, ev->port, ev->unplug);
517 goto out;
518 }
519 if (ev->port > sw->config.max_port_number) {
520 tb_warn(tb,
521 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
522 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200523 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200524 }
525 port = &sw->ports[ev->port];
526 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200527 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
528 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200529 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200530 }
531 if (ev->unplug) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200532 if (tb_port_has_remote(port)) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300533 tb_port_dbg(port, "switch unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100534 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200535 tb_free_invalid_tunnels(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300536 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200537 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200538 if (port->dual_link_port)
539 port->dual_link_port->remote = NULL;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300540 } else if (port->xdomain) {
541 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
542
543 tb_port_dbg(port, "xdomain unplugged\n");
544 /*
545 * Service drivers are unbound during
546 * tb_xdomain_remove() so setting XDomain as
547 * unplugged here prevents deadlock if they call
548 * tb_xdomain_disable_paths(). We will tear down
549 * the path below.
550 */
551 xd->is_unplugged = true;
552 tb_xdomain_remove(xd);
553 port->xdomain = NULL;
554 __tb_disconnect_xdomain_paths(tb, xd);
555 tb_xdomain_put(xd);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300556 } else if (tb_port_is_dpout(port)) {
557 tb_teardown_dp(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +0200558 } else {
Mika Westerberg62efe692018-09-17 16:32:13 +0300559 tb_port_dbg(port,
560 "got unplug event for disconnected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200561 }
562 } else if (port->remote) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300563 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200564 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +0300565 if (tb_port_is_null(port)) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300566 tb_port_dbg(port, "hotplug: scanning\n");
Mika Westerberg344e0642017-10-11 17:19:54 +0300567 tb_scan_port(port);
568 if (!port->remote)
Mika Westerberg62efe692018-09-17 16:32:13 +0300569 tb_port_dbg(port, "hotplug: no switch found\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300570 } else if (tb_port_is_dpout(port)) {
571 tb_tunnel_dp(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +0300572 }
Andreas Noever053596d2014-06-03 22:04:06 +0200573 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200574
575put_sw:
576 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200577out:
578 mutex_unlock(&tb->lock);
579 kfree(ev);
580}
581
582/**
583 * tb_schedule_hotplug_handler() - callback function for the control channel
584 *
585 * Delegates to tb_handle_hotplug.
586 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300587static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
588 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200589{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300590 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300591 u64 route;
592
593 if (type != TB_CFG_PKG_EVENT) {
594 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
595 return;
596 }
597
598 route = tb_cfg_get_route(&pkg->header);
599
600 if (tb_cfg_error(tb->ctl, route, pkg->port,
601 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
602 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
603 pkg->port);
604 }
605
Mika Westerberg4f807e42018-09-17 16:30:49 +0300606 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200607}
608
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300609static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200610{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300611 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200612 struct tb_tunnel *tunnel;
613 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200614
Andreas Noever3364f0c2014-06-03 22:04:08 +0200615 /* tunnels are only present after everything has been initialized */
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300616 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
617 /*
618 * DMA tunnels require the driver to be functional so we
619 * tear them down. Other protocol tunnels can be left
620 * intact.
621 */
622 if (tb_tunnel_is_dma(tunnel))
623 tb_tunnel_deactivate(tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200624 tb_tunnel_free(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300625 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300626 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300627 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200628}
629
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200630static int tb_scan_finalize_switch(struct device *dev, void *data)
631{
632 if (tb_is_switch(dev)) {
633 struct tb_switch *sw = tb_to_switch(dev);
634
635 /*
636 * If we found that the switch was already setup by the
637 * boot firmware, mark it as authorized now before we
638 * send uevent to userspace.
639 */
640 if (sw->boot)
641 sw->authorized = 1;
642
643 dev_set_uevent_suppress(dev, false);
644 kobject_uevent(&dev->kobj, KOBJ_ADD);
645 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
646 }
647
648 return 0;
649}
650
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300651static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200652{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300653 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300654 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200655
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300656 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Mika Westerberg444ac382018-12-30 12:17:52 +0200657 if (IS_ERR(tb->root_switch))
658 return PTR_ERR(tb->root_switch);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200659
Mika Westerberge6b245c2017-06-06 15:25:17 +0300660 /*
661 * ICM firmware upgrade needs running firmware and in native
662 * mode that is not available so disable firmware upgrade of the
663 * root switch.
664 */
665 tb->root_switch->no_nvm_upgrade = true;
666
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300667 ret = tb_switch_configure(tb->root_switch);
668 if (ret) {
669 tb_switch_put(tb->root_switch);
670 return ret;
671 }
672
673 /* Announce the switch to the world */
674 ret = tb_switch_add(tb->root_switch);
675 if (ret) {
676 tb_switch_put(tb->root_switch);
677 return ret;
678 }
679
Andreas Noever9da672a2014-06-03 22:04:05 +0200680 /* Full scan to discover devices added before the driver was loaded. */
681 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200682 /* Find out tunnels created by the boot firmware */
683 tb_discover_tunnels(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200684 /* Make the discovered switches available to the userspace */
685 device_for_each_child(&tb->root_switch->dev, NULL,
686 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +0200687
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200688 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300689 tcm->hotplug_active = true;
690 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200691}
692
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300693static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200694{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300695 struct tb_cm *tcm = tb_priv(tb);
696
Mika Westerbergdaa51402018-10-01 12:31:19 +0300697 tb_dbg(tb, "suspending...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200698 tb_switch_suspend(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300699 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300700 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300701
702 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200703}
704
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300705static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200706{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300707 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200708 struct tb_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300709
Mika Westerbergdaa51402018-10-01 12:31:19 +0300710 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200711
712 /* remove any pci devices the firmware might have setup */
713 tb_switch_reset(tb, 0);
714
715 tb_switch_resume(tb->root_switch);
716 tb_free_invalid_tunnels(tb);
717 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300718 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200719 tb_tunnel_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300720 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200721 /*
722 * the pcie links need some time to get going.
723 * 100ms works for me...
724 */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300725 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200726 msleep(100);
727 }
728 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300729 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +0300730 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300731
732 return 0;
733}
734
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300735static int tb_free_unplugged_xdomains(struct tb_switch *sw)
736{
737 int i, ret = 0;
738
739 for (i = 1; i <= sw->config.max_port_number; i++) {
740 struct tb_port *port = &sw->ports[i];
741
742 if (tb_is_upstream_port(port))
743 continue;
744 if (port->xdomain && port->xdomain->is_unplugged) {
745 tb_xdomain_remove(port->xdomain);
746 port->xdomain = NULL;
747 ret++;
748 } else if (port->remote) {
749 ret += tb_free_unplugged_xdomains(port->remote->sw);
750 }
751 }
752
753 return ret;
754}
755
756static void tb_complete(struct tb *tb)
757{
758 /*
759 * Release any unplugged XDomains and if there is a case where
760 * another domain is swapped in place of unplugged XDomain we
761 * need to run another rescan.
762 */
763 mutex_lock(&tb->lock);
764 if (tb_free_unplugged_xdomains(tb->root_switch))
765 tb_scan_switch(tb->root_switch);
766 mutex_unlock(&tb->lock);
767}
768
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300769static const struct tb_cm_ops tb_cm_ops = {
770 .start = tb_start,
771 .stop = tb_stop,
772 .suspend_noirq = tb_suspend_noirq,
773 .resume_noirq = tb_resume_noirq,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300774 .complete = tb_complete,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300775 .handle_event = tb_handle_event,
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200776 .approve_switch = tb_tunnel_pci,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300777 .approve_xdomain_paths = tb_approve_xdomain_paths,
778 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300779};
780
781struct tb *tb_probe(struct tb_nhi *nhi)
782{
783 struct tb_cm *tcm;
784 struct tb *tb;
785
Lukas Wunner630b3af2017-08-01 14:10:41 +0200786 if (!x86_apple_machine)
Mika Westerbergf67cf492017-06-06 15:25:16 +0300787 return NULL;
788
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300789 tb = tb_domain_alloc(nhi, sizeof(*tcm));
790 if (!tb)
791 return NULL;
792
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200793 tb->security_level = TB_SECURITY_USER;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300794 tb->cm_ops = &tb_cm_ops;
795
796 tcm = tb_priv(tb);
797 INIT_LIST_HEAD(&tcm->tunnel_list);
798
799 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200800}