blob: 49589b38ff12f4dd8357b61a28c7a962bfeab7b3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
Lukas Wunner630b3af2017-08-01 14:10:41 +020012#include <linux/platform_data/x86/apple.h>
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020013
14#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020015#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020016#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020017
Mika Westerberg9d3cce02017-06-06 15:25:00 +030018/**
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
25 */
26struct tb_cm {
27 struct list_head tunnel_list;
28 bool hotplug_active;
29};
Andreas Noever9da672a2014-06-03 22:04:05 +020030
Mika Westerberg4f807e42018-09-17 16:30:49 +030031struct tb_hotplug_event {
32 struct work_struct work;
33 struct tb *tb;
34 u64 route;
35 u8 port;
36 bool unplug;
37};
38
39static void tb_handle_hotplug(struct work_struct *work);
40
41static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42{
43 struct tb_hotplug_event *ev;
44
45 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46 if (!ev)
47 return;
48
49 ev->tb = tb;
50 ev->route = route;
51 ev->port = port;
52 ev->unplug = unplug;
53 INIT_WORK(&ev->work, tb_handle_hotplug);
54 queue_work(tb->wq, &ev->work);
55}
56
Andreas Noever9da672a2014-06-03 22:04:05 +020057/* enumeration & hot plug handling */
58
Mika Westerberg0414bec2017-02-19 23:43:26 +020059static void tb_discover_tunnels(struct tb_switch *sw)
60{
61 struct tb *tb = sw->tb;
62 struct tb_cm *tcm = tb_priv(tb);
63 struct tb_port *port;
Mika Westerberg0414bec2017-02-19 23:43:26 +020064
Mika Westerbergb433d012019-09-30 14:07:22 +030065 tb_switch_for_each_port(sw, port) {
Mika Westerberg0414bec2017-02-19 23:43:26 +020066 struct tb_tunnel *tunnel = NULL;
67
Mika Westerberg0414bec2017-02-19 23:43:26 +020068 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +030069 case TB_TYPE_DP_HDMI_IN:
70 tunnel = tb_tunnel_discover_dp(tb, port);
71 break;
72
Mika Westerberg0414bec2017-02-19 23:43:26 +020073 case TB_TYPE_PCIE_DOWN:
74 tunnel = tb_tunnel_discover_pci(tb, port);
75 break;
76
77 default:
78 break;
79 }
80
Mika Westerberg4f807e42018-09-17 16:30:49 +030081 if (!tunnel)
82 continue;
83
84 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +020085 struct tb_switch *parent = tunnel->dst_port->sw;
86
87 while (parent != tunnel->src_port->sw) {
88 parent->boot = true;
89 parent = tb_switch_parent(parent);
90 }
Mika Westerberg0414bec2017-02-19 23:43:26 +020091 }
Mika Westerberg4f807e42018-09-17 16:30:49 +030092
93 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0414bec2017-02-19 23:43:26 +020094 }
95
Mika Westerbergb433d012019-09-30 14:07:22 +030096 tb_switch_for_each_port(sw, port) {
97 if (tb_port_has_remote(port))
98 tb_discover_tunnels(port->remote->sw);
Mika Westerberg0414bec2017-02-19 23:43:26 +020099 }
100}
Andreas Noever9da672a2014-06-03 22:04:05 +0200101
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300102static void tb_scan_xdomain(struct tb_port *port)
103{
104 struct tb_switch *sw = port->sw;
105 struct tb *tb = sw->tb;
106 struct tb_xdomain *xd;
107 u64 route;
108
109 route = tb_downstream_route(port);
110 xd = tb_xdomain_find_by_route(tb, route);
111 if (xd) {
112 tb_xdomain_put(xd);
113 return;
114 }
115
116 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
117 NULL);
118 if (xd) {
119 tb_port_at(route, sw)->xdomain = xd;
120 tb_xdomain_add(xd);
121 }
122}
123
Andreas Noever9da672a2014-06-03 22:04:05 +0200124static void tb_scan_port(struct tb_port *port);
125
126/**
127 * tb_scan_switch() - scan for and initialize downstream switches
128 */
129static void tb_scan_switch(struct tb_switch *sw)
130{
Mika Westerbergb433d012019-09-30 14:07:22 +0300131 struct tb_port *port;
132
133 tb_switch_for_each_port(sw, port)
134 tb_scan_port(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200135}
136
137/**
138 * tb_scan_port() - check for and initialize switches below port
139 */
140static void tb_scan_port(struct tb_port *port)
141{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200142 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200143 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200144 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200145
Andreas Noever9da672a2014-06-03 22:04:05 +0200146 if (tb_is_upstream_port(port))
147 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300148
149 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
150 !tb_dp_port_is_enabled(port)) {
151 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
152 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
153 false);
154 return;
155 }
156
Andreas Noever9da672a2014-06-03 22:04:05 +0200157 if (port->config.type != TB_TYPE_PORT)
158 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200159 if (port->dual_link_port && port->link_nr)
160 return; /*
161 * Downstream switch is reachable through two ports.
162 * Only scan on the primary port (link_nr == 0).
163 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200164 if (tb_wait_for_port(port, false) <= 0)
165 return;
166 if (port->remote) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300167 tb_port_dbg(port, "port already has a remote\n");
Andreas Noever9da672a2014-06-03 22:04:05 +0200168 return;
169 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300170 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
171 tb_downstream_route(port));
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300172 if (IS_ERR(sw)) {
173 /*
174 * If there is an error accessing the connected switch
175 * it may be connected to another domain. Also we allow
176 * the other domain to be connected to a max depth switch.
177 */
178 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
179 tb_scan_xdomain(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200180 return;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300181 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300182
183 if (tb_switch_configure(sw)) {
184 tb_switch_put(sw);
185 return;
186 }
187
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200188 /*
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300189 * If there was previously another domain connected remove it
190 * first.
191 */
192 if (port->xdomain) {
193 tb_xdomain_remove(port->xdomain);
194 port->xdomain = NULL;
195 }
196
197 /*
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200198 * Do not send uevents until we have discovered all existing
199 * tunnels and know which switches were authorized already by
200 * the boot firmware.
201 */
202 if (!tcm->hotplug_active)
203 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300204
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300205 if (tb_switch_add(sw)) {
206 tb_switch_put(sw);
207 return;
208 }
209
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200210 /* Link the switches using both links if available */
211 upstream_port = tb_upstream_port(sw);
212 port->remote = upstream_port;
213 upstream_port->remote = port;
214 if (port->dual_link_port && upstream_port->dual_link_port) {
215 port->dual_link_port->remote = upstream_port->dual_link_port;
216 upstream_port->dual_link_port->remote = port->dual_link_port;
217 }
218
Andreas Noever9da672a2014-06-03 22:04:05 +0200219 tb_scan_switch(sw);
220}
221
Mika Westerberg4f807e42018-09-17 16:30:49 +0300222static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
223 struct tb_port *src_port, struct tb_port *dst_port)
224{
225 struct tb_cm *tcm = tb_priv(tb);
226 struct tb_tunnel *tunnel;
227
228 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
229 if (tunnel->type == type &&
230 ((src_port && src_port == tunnel->src_port) ||
231 (dst_port && dst_port == tunnel->dst_port))) {
232 tb_tunnel_deactivate(tunnel);
233 list_del(&tunnel->list);
234 tb_tunnel_free(tunnel);
235 return 0;
236 }
237 }
238
239 return -ENODEV;
240}
241
Andreas Noever3364f0c2014-06-03 22:04:08 +0200242/**
243 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
244 */
245static void tb_free_invalid_tunnels(struct tb *tb)
246{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300247 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200248 struct tb_tunnel *tunnel;
249 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300250
251 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200252 if (tb_tunnel_is_invalid(tunnel)) {
253 tb_tunnel_deactivate(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300254 list_del(&tunnel->list);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200255 tb_tunnel_free(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200256 }
257 }
258}
259
260/**
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200261 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
262 */
263static void tb_free_unplugged_children(struct tb_switch *sw)
264{
Mika Westerbergb433d012019-09-30 14:07:22 +0300265 struct tb_port *port;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200266
Mika Westerbergb433d012019-09-30 14:07:22 +0300267 tb_switch_for_each_port(sw, port) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200268 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200269 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200270
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200271 if (port->remote->sw->is_unplugged) {
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300272 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200273 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200274 if (port->dual_link_port)
275 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200276 } else {
277 tb_free_unplugged_children(port->remote->sw);
278 }
279 }
280}
281
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200282/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300283 * tb_find_port() - return the first port of @type on @sw or NULL
284 * @sw: Switch to find the port from
285 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200286 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300287static struct tb_port *tb_find_port(struct tb_switch *sw,
288 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200289{
Mika Westerbergb433d012019-09-30 14:07:22 +0300290 struct tb_port *port;
291
292 tb_switch_for_each_port(sw, port) {
293 if (port->config.type == type)
294 return port;
295 }
296
Andreas Noever3364f0c2014-06-03 22:04:08 +0200297 return NULL;
298}
299
300/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300301 * tb_find_unused_port() - return the first inactive port on @sw
302 * @sw: Switch to find the port on
303 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200304 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300305static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
306 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200307{
Mika Westerbergb433d012019-09-30 14:07:22 +0300308 struct tb_port *port;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300309
Mika Westerbergb433d012019-09-30 14:07:22 +0300310 tb_switch_for_each_port(sw, port) {
311 if (tb_is_upstream_port(port))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200312 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300313 if (port->config.type != type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200314 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300315 if (port->cap_adap)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200316 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300317 if (tb_port_is_enabled(port))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200318 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300319 return port;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200320 }
321 return NULL;
322}
323
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200324static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
325 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200326{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200327 /*
328 * To keep plugging devices consistently in the same PCIe
329 * hierarchy, do mapping here for root switch downstream PCIe
330 * ports.
331 */
332 if (!tb_route(sw)) {
333 int phy_port = tb_phy_port_from_link(port->port);
334 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300335
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200336 /*
337 * Hard-coded Thunderbolt port to PCIe down port mapping
338 * per controller.
339 */
340 if (tb_switch_is_cr(sw))
341 index = !phy_port ? 6 : 7;
342 else if (tb_switch_is_fr(sw))
343 index = !phy_port ? 6 : 8;
344 else
345 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200346
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200347 /* Validate the hard-coding */
348 if (WARN_ON(index > sw->config.max_port_number))
349 goto out;
350 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
351 goto out;
352 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
353 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200354
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200355 return &sw->ports[index];
Andreas Noever3364f0c2014-06-03 22:04:08 +0200356 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200357
358out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300359 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200360}
361
Mika Westerberg4f807e42018-09-17 16:30:49 +0300362static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
363{
364 struct tb_cm *tcm = tb_priv(tb);
365 struct tb_switch *sw = out->sw;
366 struct tb_tunnel *tunnel;
367 struct tb_port *in;
368
369 if (tb_port_is_enabled(out))
370 return 0;
371
372 do {
373 sw = tb_to_switch(sw->dev.parent);
374 if (!sw)
375 return 0;
376 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
377 } while (!in);
378
379 tunnel = tb_tunnel_alloc_dp(tb, in, out);
380 if (!tunnel) {
381 tb_port_dbg(out, "DP tunnel allocation failed\n");
382 return -ENOMEM;
383 }
384
385 if (tb_tunnel_activate(tunnel)) {
386 tb_port_info(out, "DP tunnel activation failed, aborting\n");
387 tb_tunnel_free(tunnel);
388 return -EIO;
389 }
390
391 list_add_tail(&tunnel->list, &tcm->tunnel_list);
392 return 0;
393}
394
395static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
396{
397 tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
398}
399
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200400static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
401{
402 struct tb_port *up, *down, *port;
403 struct tb_cm *tcm = tb_priv(tb);
404 struct tb_switch *parent_sw;
405 struct tb_tunnel *tunnel;
406
Mika Westerberge78db6f2017-10-12 16:45:50 +0300407 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200408 if (!up)
409 return 0;
410
411 /*
412 * Look up available down port. Since we are chaining it should
413 * be found right above this switch.
414 */
415 parent_sw = tb_to_switch(sw->dev.parent);
416 port = tb_port_at(tb_route(sw), parent_sw);
417 down = tb_find_pcie_down(parent_sw, port);
418 if (!down)
419 return 0;
420
421 tunnel = tb_tunnel_alloc_pci(tb, up, down);
422 if (!tunnel)
423 return -ENOMEM;
424
425 if (tb_tunnel_activate(tunnel)) {
426 tb_port_info(up,
427 "PCIe tunnel activation failed, aborting\n");
428 tb_tunnel_free(tunnel);
429 return -EIO;
430 }
431
432 list_add_tail(&tunnel->list, &tcm->tunnel_list);
433 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200434}
Andreas Noever9da672a2014-06-03 22:04:05 +0200435
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300436static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
437{
438 struct tb_cm *tcm = tb_priv(tb);
439 struct tb_port *nhi_port, *dst_port;
440 struct tb_tunnel *tunnel;
441 struct tb_switch *sw;
442
443 sw = tb_to_switch(xd->dev.parent);
444 dst_port = tb_port_at(xd->route, sw);
445 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
446
447 mutex_lock(&tb->lock);
448 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
449 xd->transmit_path, xd->receive_ring,
450 xd->receive_path);
451 if (!tunnel) {
452 mutex_unlock(&tb->lock);
453 return -ENOMEM;
454 }
455
456 if (tb_tunnel_activate(tunnel)) {
457 tb_port_info(nhi_port,
458 "DMA tunnel activation failed, aborting\n");
459 tb_tunnel_free(tunnel);
460 mutex_unlock(&tb->lock);
461 return -EIO;
462 }
463
464 list_add_tail(&tunnel->list, &tcm->tunnel_list);
465 mutex_unlock(&tb->lock);
466 return 0;
467}
468
469static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
470{
471 struct tb_port *dst_port;
472 struct tb_switch *sw;
473
474 sw = tb_to_switch(xd->dev.parent);
475 dst_port = tb_port_at(xd->route, sw);
476
477 /*
478 * It is possible that the tunnel was already teared down (in
479 * case of cable disconnect) so it is fine if we cannot find it
480 * here anymore.
481 */
482 tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
483}
484
485static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
486{
487 if (!xd->is_unplugged) {
488 mutex_lock(&tb->lock);
489 __tb_disconnect_xdomain_paths(tb, xd);
490 mutex_unlock(&tb->lock);
491 }
492 return 0;
493}
494
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200495/* hotplug handling */
496
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200497/**
498 * tb_handle_hotplug() - handle hotplug event
499 *
500 * Executes on tb->wq.
501 */
502static void tb_handle_hotplug(struct work_struct *work)
503{
504 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
505 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300506 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200507 struct tb_switch *sw;
508 struct tb_port *port;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200509 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300510 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200511 goto out; /* during init, suspend or shutdown */
512
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200513 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +0200514 if (!sw) {
515 tb_warn(tb,
516 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
517 ev->route, ev->port, ev->unplug);
518 goto out;
519 }
520 if (ev->port > sw->config.max_port_number) {
521 tb_warn(tb,
522 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
523 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200524 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200525 }
526 port = &sw->ports[ev->port];
527 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200528 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
529 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200530 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200531 }
532 if (ev->unplug) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200533 if (tb_port_has_remote(port)) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300534 tb_port_dbg(port, "switch unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100535 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200536 tb_free_invalid_tunnels(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300537 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200538 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200539 if (port->dual_link_port)
540 port->dual_link_port->remote = NULL;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300541 } else if (port->xdomain) {
542 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
543
544 tb_port_dbg(port, "xdomain unplugged\n");
545 /*
546 * Service drivers are unbound during
547 * tb_xdomain_remove() so setting XDomain as
548 * unplugged here prevents deadlock if they call
549 * tb_xdomain_disable_paths(). We will tear down
550 * the path below.
551 */
552 xd->is_unplugged = true;
553 tb_xdomain_remove(xd);
554 port->xdomain = NULL;
555 __tb_disconnect_xdomain_paths(tb, xd);
556 tb_xdomain_put(xd);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300557 } else if (tb_port_is_dpout(port)) {
558 tb_teardown_dp(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +0200559 } else {
Mika Westerberg62efe692018-09-17 16:32:13 +0300560 tb_port_dbg(port,
561 "got unplug event for disconnected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200562 }
563 } else if (port->remote) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300564 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200565 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +0300566 if (tb_port_is_null(port)) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300567 tb_port_dbg(port, "hotplug: scanning\n");
Mika Westerberg344e0642017-10-11 17:19:54 +0300568 tb_scan_port(port);
569 if (!port->remote)
Mika Westerberg62efe692018-09-17 16:32:13 +0300570 tb_port_dbg(port, "hotplug: no switch found\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300571 } else if (tb_port_is_dpout(port)) {
572 tb_tunnel_dp(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +0300573 }
Andreas Noever053596d2014-06-03 22:04:06 +0200574 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200575
576put_sw:
577 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200578out:
579 mutex_unlock(&tb->lock);
580 kfree(ev);
581}
582
583/**
584 * tb_schedule_hotplug_handler() - callback function for the control channel
585 *
586 * Delegates to tb_handle_hotplug.
587 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300588static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
589 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200590{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300591 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300592 u64 route;
593
594 if (type != TB_CFG_PKG_EVENT) {
595 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
596 return;
597 }
598
599 route = tb_cfg_get_route(&pkg->header);
600
601 if (tb_cfg_error(tb->ctl, route, pkg->port,
602 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
603 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
604 pkg->port);
605 }
606
Mika Westerberg4f807e42018-09-17 16:30:49 +0300607 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200608}
609
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300610static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200611{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300612 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200613 struct tb_tunnel *tunnel;
614 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200615
Andreas Noever3364f0c2014-06-03 22:04:08 +0200616 /* tunnels are only present after everything has been initialized */
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300617 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
618 /*
619 * DMA tunnels require the driver to be functional so we
620 * tear them down. Other protocol tunnels can be left
621 * intact.
622 */
623 if (tb_tunnel_is_dma(tunnel))
624 tb_tunnel_deactivate(tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200625 tb_tunnel_free(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300626 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300627 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300628 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200629}
630
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200631static int tb_scan_finalize_switch(struct device *dev, void *data)
632{
633 if (tb_is_switch(dev)) {
634 struct tb_switch *sw = tb_to_switch(dev);
635
636 /*
637 * If we found that the switch was already setup by the
638 * boot firmware, mark it as authorized now before we
639 * send uevent to userspace.
640 */
641 if (sw->boot)
642 sw->authorized = 1;
643
644 dev_set_uevent_suppress(dev, false);
645 kobject_uevent(&dev->kobj, KOBJ_ADD);
646 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
647 }
648
649 return 0;
650}
651
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300652static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200653{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300654 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300655 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200656
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300657 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Mika Westerberg444ac382018-12-30 12:17:52 +0200658 if (IS_ERR(tb->root_switch))
659 return PTR_ERR(tb->root_switch);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200660
Mika Westerberge6b245c2017-06-06 15:25:17 +0300661 /*
662 * ICM firmware upgrade needs running firmware and in native
663 * mode that is not available so disable firmware upgrade of the
664 * root switch.
665 */
666 tb->root_switch->no_nvm_upgrade = true;
667
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300668 ret = tb_switch_configure(tb->root_switch);
669 if (ret) {
670 tb_switch_put(tb->root_switch);
671 return ret;
672 }
673
674 /* Announce the switch to the world */
675 ret = tb_switch_add(tb->root_switch);
676 if (ret) {
677 tb_switch_put(tb->root_switch);
678 return ret;
679 }
680
Andreas Noever9da672a2014-06-03 22:04:05 +0200681 /* Full scan to discover devices added before the driver was loaded. */
682 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200683 /* Find out tunnels created by the boot firmware */
684 tb_discover_tunnels(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200685 /* Make the discovered switches available to the userspace */
686 device_for_each_child(&tb->root_switch->dev, NULL,
687 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +0200688
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200689 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300690 tcm->hotplug_active = true;
691 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200692}
693
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300694static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200695{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300696 struct tb_cm *tcm = tb_priv(tb);
697
Mika Westerbergdaa51402018-10-01 12:31:19 +0300698 tb_dbg(tb, "suspending...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200699 tb_switch_suspend(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300700 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300701 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300702
703 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200704}
705
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300706static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200707{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300708 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200709 struct tb_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300710
Mika Westerbergdaa51402018-10-01 12:31:19 +0300711 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200712
713 /* remove any pci devices the firmware might have setup */
714 tb_switch_reset(tb, 0);
715
716 tb_switch_resume(tb->root_switch);
717 tb_free_invalid_tunnels(tb);
718 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300719 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200720 tb_tunnel_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300721 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200722 /*
723 * the pcie links need some time to get going.
724 * 100ms works for me...
725 */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300726 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200727 msleep(100);
728 }
729 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300730 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +0300731 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300732
733 return 0;
734}
735
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300736static int tb_free_unplugged_xdomains(struct tb_switch *sw)
737{
Mika Westerbergb433d012019-09-30 14:07:22 +0300738 struct tb_port *port;
739 int ret = 0;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300740
Mika Westerbergb433d012019-09-30 14:07:22 +0300741 tb_switch_for_each_port(sw, port) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300742 if (tb_is_upstream_port(port))
743 continue;
744 if (port->xdomain && port->xdomain->is_unplugged) {
745 tb_xdomain_remove(port->xdomain);
746 port->xdomain = NULL;
747 ret++;
748 } else if (port->remote) {
749 ret += tb_free_unplugged_xdomains(port->remote->sw);
750 }
751 }
752
753 return ret;
754}
755
756static void tb_complete(struct tb *tb)
757{
758 /*
759 * Release any unplugged XDomains and if there is a case where
760 * another domain is swapped in place of unplugged XDomain we
761 * need to run another rescan.
762 */
763 mutex_lock(&tb->lock);
764 if (tb_free_unplugged_xdomains(tb->root_switch))
765 tb_scan_switch(tb->root_switch);
766 mutex_unlock(&tb->lock);
767}
768
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300769static const struct tb_cm_ops tb_cm_ops = {
770 .start = tb_start,
771 .stop = tb_stop,
772 .suspend_noirq = tb_suspend_noirq,
773 .resume_noirq = tb_resume_noirq,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300774 .complete = tb_complete,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300775 .handle_event = tb_handle_event,
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200776 .approve_switch = tb_tunnel_pci,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300777 .approve_xdomain_paths = tb_approve_xdomain_paths,
778 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300779};
780
781struct tb *tb_probe(struct tb_nhi *nhi)
782{
783 struct tb_cm *tcm;
784 struct tb *tb;
785
Lukas Wunner630b3af2017-08-01 14:10:41 +0200786 if (!x86_apple_machine)
Mika Westerbergf67cf492017-06-06 15:25:16 +0300787 return NULL;
788
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300789 tb = tb_domain_alloc(nhi, sizeof(*tcm));
790 if (!tb)
791 return NULL;
792
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200793 tb->security_level = TB_SECURITY_USER;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300794 tb->cm_ops = &tb_cm_ops;
795
796 tcm = tb_priv(tb);
797 INIT_LIST_HEAD(&tcm->tunnel_list);
798
799 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200800}