blob: f2ce6adc1f481d3fdbf74127c173b9d5e0bedd62 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
Lukas Wunner630b3af2017-08-01 14:10:41 +020012#include <linux/platform_data/x86/apple.h>
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020013
14#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020015#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020016#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020017
Mika Westerberg9d3cce02017-06-06 15:25:00 +030018/**
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
25 */
26struct tb_cm {
27 struct list_head tunnel_list;
28 bool hotplug_active;
29};
Andreas Noever9da672a2014-06-03 22:04:05 +020030
Mika Westerberg4f807e42018-09-17 16:30:49 +030031struct tb_hotplug_event {
32 struct work_struct work;
33 struct tb *tb;
34 u64 route;
35 u8 port;
36 bool unplug;
37};
38
39static void tb_handle_hotplug(struct work_struct *work);
40
41static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42{
43 struct tb_hotplug_event *ev;
44
45 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46 if (!ev)
47 return;
48
49 ev->tb = tb;
50 ev->route = route;
51 ev->port = port;
52 ev->unplug = unplug;
53 INIT_WORK(&ev->work, tb_handle_hotplug);
54 queue_work(tb->wq, &ev->work);
55}
56
Andreas Noever9da672a2014-06-03 22:04:05 +020057/* enumeration & hot plug handling */
58
Mika Westerberg0414bec2017-02-19 23:43:26 +020059static void tb_discover_tunnels(struct tb_switch *sw)
60{
61 struct tb *tb = sw->tb;
62 struct tb_cm *tcm = tb_priv(tb);
63 struct tb_port *port;
Mika Westerberg0414bec2017-02-19 23:43:26 +020064
Mika Westerbergb433d012019-09-30 14:07:22 +030065 tb_switch_for_each_port(sw, port) {
Mika Westerberg0414bec2017-02-19 23:43:26 +020066 struct tb_tunnel *tunnel = NULL;
67
Mika Westerberg0414bec2017-02-19 23:43:26 +020068 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +030069 case TB_TYPE_DP_HDMI_IN:
70 tunnel = tb_tunnel_discover_dp(tb, port);
71 break;
72
Mika Westerberg0414bec2017-02-19 23:43:26 +020073 case TB_TYPE_PCIE_DOWN:
74 tunnel = tb_tunnel_discover_pci(tb, port);
75 break;
76
77 default:
78 break;
79 }
80
Mika Westerberg4f807e42018-09-17 16:30:49 +030081 if (!tunnel)
82 continue;
83
84 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +020085 struct tb_switch *parent = tunnel->dst_port->sw;
86
87 while (parent != tunnel->src_port->sw) {
88 parent->boot = true;
89 parent = tb_switch_parent(parent);
90 }
Mika Westerberg0414bec2017-02-19 23:43:26 +020091 }
Mika Westerberg4f807e42018-09-17 16:30:49 +030092
93 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0414bec2017-02-19 23:43:26 +020094 }
95
Mika Westerbergb433d012019-09-30 14:07:22 +030096 tb_switch_for_each_port(sw, port) {
97 if (tb_port_has_remote(port))
98 tb_discover_tunnels(port->remote->sw);
Mika Westerberg0414bec2017-02-19 23:43:26 +020099 }
100}
Andreas Noever9da672a2014-06-03 22:04:05 +0200101
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300102static void tb_scan_xdomain(struct tb_port *port)
103{
104 struct tb_switch *sw = port->sw;
105 struct tb *tb = sw->tb;
106 struct tb_xdomain *xd;
107 u64 route;
108
109 route = tb_downstream_route(port);
110 xd = tb_xdomain_find_by_route(tb, route);
111 if (xd) {
112 tb_xdomain_put(xd);
113 return;
114 }
115
116 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
117 NULL);
118 if (xd) {
119 tb_port_at(route, sw)->xdomain = xd;
120 tb_xdomain_add(xd);
121 }
122}
123
Andreas Noever9da672a2014-06-03 22:04:05 +0200124static void tb_scan_port(struct tb_port *port);
125
126/**
127 * tb_scan_switch() - scan for and initialize downstream switches
128 */
129static void tb_scan_switch(struct tb_switch *sw)
130{
Mika Westerbergb433d012019-09-30 14:07:22 +0300131 struct tb_port *port;
132
133 tb_switch_for_each_port(sw, port)
134 tb_scan_port(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200135}
136
137/**
138 * tb_scan_port() - check for and initialize switches below port
139 */
140static void tb_scan_port(struct tb_port *port)
141{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200142 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200143 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200144 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200145
Andreas Noever9da672a2014-06-03 22:04:05 +0200146 if (tb_is_upstream_port(port))
147 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300148
149 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
150 !tb_dp_port_is_enabled(port)) {
151 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
152 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
153 false);
154 return;
155 }
156
Andreas Noever9da672a2014-06-03 22:04:05 +0200157 if (port->config.type != TB_TYPE_PORT)
158 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200159 if (port->dual_link_port && port->link_nr)
160 return; /*
161 * Downstream switch is reachable through two ports.
162 * Only scan on the primary port (link_nr == 0).
163 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200164 if (tb_wait_for_port(port, false) <= 0)
165 return;
166 if (port->remote) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300167 tb_port_dbg(port, "port already has a remote\n");
Andreas Noever9da672a2014-06-03 22:04:05 +0200168 return;
169 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300170 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
171 tb_downstream_route(port));
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300172 if (IS_ERR(sw)) {
173 /*
174 * If there is an error accessing the connected switch
175 * it may be connected to another domain. Also we allow
176 * the other domain to be connected to a max depth switch.
177 */
178 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
179 tb_scan_xdomain(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200180 return;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300181 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300182
183 if (tb_switch_configure(sw)) {
184 tb_switch_put(sw);
185 return;
186 }
187
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200188 /*
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300189 * If there was previously another domain connected remove it
190 * first.
191 */
192 if (port->xdomain) {
193 tb_xdomain_remove(port->xdomain);
194 port->xdomain = NULL;
195 }
196
197 /*
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200198 * Do not send uevents until we have discovered all existing
199 * tunnels and know which switches were authorized already by
200 * the boot firmware.
201 */
202 if (!tcm->hotplug_active)
203 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300204
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300205 if (tb_switch_add(sw)) {
206 tb_switch_put(sw);
207 return;
208 }
209
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200210 /* Link the switches using both links if available */
211 upstream_port = tb_upstream_port(sw);
212 port->remote = upstream_port;
213 upstream_port->remote = port;
214 if (port->dual_link_port && upstream_port->dual_link_port) {
215 port->dual_link_port->remote = upstream_port->dual_link_port;
216 upstream_port->dual_link_port->remote = port->dual_link_port;
217 }
218
Mika Westerberg91c0c122019-03-21 19:03:00 +0200219 /* Enable lane bonding if supported */
220 if (tb_switch_lane_bonding_enable(sw))
221 tb_sw_warn(sw, "failed to enable lane bonding\n");
222
Andreas Noever9da672a2014-06-03 22:04:05 +0200223 tb_scan_switch(sw);
224}
225
Mika Westerberg4f807e42018-09-17 16:30:49 +0300226static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
227 struct tb_port *src_port, struct tb_port *dst_port)
228{
229 struct tb_cm *tcm = tb_priv(tb);
230 struct tb_tunnel *tunnel;
231
232 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
233 if (tunnel->type == type &&
234 ((src_port && src_port == tunnel->src_port) ||
235 (dst_port && dst_port == tunnel->dst_port))) {
236 tb_tunnel_deactivate(tunnel);
237 list_del(&tunnel->list);
238 tb_tunnel_free(tunnel);
239 return 0;
240 }
241 }
242
243 return -ENODEV;
244}
245
Andreas Noever3364f0c2014-06-03 22:04:08 +0200246/**
247 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
248 */
249static void tb_free_invalid_tunnels(struct tb *tb)
250{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300251 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200252 struct tb_tunnel *tunnel;
253 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300254
255 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200256 if (tb_tunnel_is_invalid(tunnel)) {
257 tb_tunnel_deactivate(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300258 list_del(&tunnel->list);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200259 tb_tunnel_free(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200260 }
261 }
262}
263
264/**
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200265 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
266 */
267static void tb_free_unplugged_children(struct tb_switch *sw)
268{
Mika Westerbergb433d012019-09-30 14:07:22 +0300269 struct tb_port *port;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200270
Mika Westerbergb433d012019-09-30 14:07:22 +0300271 tb_switch_for_each_port(sw, port) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200272 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200273 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200274
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200275 if (port->remote->sw->is_unplugged) {
Mika Westerberg91c0c122019-03-21 19:03:00 +0200276 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300277 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200278 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200279 if (port->dual_link_port)
280 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200281 } else {
282 tb_free_unplugged_children(port->remote->sw);
283 }
284 }
285}
286
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200287/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300288 * tb_find_port() - return the first port of @type on @sw or NULL
289 * @sw: Switch to find the port from
290 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200291 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300292static struct tb_port *tb_find_port(struct tb_switch *sw,
293 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200294{
Mika Westerbergb433d012019-09-30 14:07:22 +0300295 struct tb_port *port;
296
297 tb_switch_for_each_port(sw, port) {
298 if (port->config.type == type)
299 return port;
300 }
301
Andreas Noever3364f0c2014-06-03 22:04:08 +0200302 return NULL;
303}
304
305/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300306 * tb_find_unused_port() - return the first inactive port on @sw
307 * @sw: Switch to find the port on
308 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200309 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300310static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
311 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200312{
Mika Westerbergb433d012019-09-30 14:07:22 +0300313 struct tb_port *port;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300314
Mika Westerbergb433d012019-09-30 14:07:22 +0300315 tb_switch_for_each_port(sw, port) {
316 if (tb_is_upstream_port(port))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200317 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300318 if (port->config.type != type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200319 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300320 if (port->cap_adap)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200321 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300322 if (tb_port_is_enabled(port))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200323 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300324 return port;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200325 }
326 return NULL;
327}
328
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200329static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
330 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200331{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200332 /*
333 * To keep plugging devices consistently in the same PCIe
334 * hierarchy, do mapping here for root switch downstream PCIe
335 * ports.
336 */
337 if (!tb_route(sw)) {
338 int phy_port = tb_phy_port_from_link(port->port);
339 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300340
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200341 /*
342 * Hard-coded Thunderbolt port to PCIe down port mapping
343 * per controller.
344 */
345 if (tb_switch_is_cr(sw))
346 index = !phy_port ? 6 : 7;
347 else if (tb_switch_is_fr(sw))
348 index = !phy_port ? 6 : 8;
349 else
350 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200351
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200352 /* Validate the hard-coding */
353 if (WARN_ON(index > sw->config.max_port_number))
354 goto out;
355 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
356 goto out;
357 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
358 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200359
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200360 return &sw->ports[index];
Andreas Noever3364f0c2014-06-03 22:04:08 +0200361 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200362
363out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300364 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200365}
366
Mika Westerberg4f807e42018-09-17 16:30:49 +0300367static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
368{
369 struct tb_cm *tcm = tb_priv(tb);
370 struct tb_switch *sw = out->sw;
371 struct tb_tunnel *tunnel;
372 struct tb_port *in;
373
374 if (tb_port_is_enabled(out))
375 return 0;
376
377 do {
378 sw = tb_to_switch(sw->dev.parent);
379 if (!sw)
380 return 0;
381 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
382 } while (!in);
383
384 tunnel = tb_tunnel_alloc_dp(tb, in, out);
385 if (!tunnel) {
386 tb_port_dbg(out, "DP tunnel allocation failed\n");
387 return -ENOMEM;
388 }
389
390 if (tb_tunnel_activate(tunnel)) {
391 tb_port_info(out, "DP tunnel activation failed, aborting\n");
392 tb_tunnel_free(tunnel);
393 return -EIO;
394 }
395
396 list_add_tail(&tunnel->list, &tcm->tunnel_list);
397 return 0;
398}
399
400static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
401{
402 tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
403}
404
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200405static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
406{
407 struct tb_port *up, *down, *port;
408 struct tb_cm *tcm = tb_priv(tb);
409 struct tb_switch *parent_sw;
410 struct tb_tunnel *tunnel;
411
Mika Westerberge78db6f2017-10-12 16:45:50 +0300412 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200413 if (!up)
414 return 0;
415
416 /*
417 * Look up available down port. Since we are chaining it should
418 * be found right above this switch.
419 */
420 parent_sw = tb_to_switch(sw->dev.parent);
421 port = tb_port_at(tb_route(sw), parent_sw);
422 down = tb_find_pcie_down(parent_sw, port);
423 if (!down)
424 return 0;
425
426 tunnel = tb_tunnel_alloc_pci(tb, up, down);
427 if (!tunnel)
428 return -ENOMEM;
429
430 if (tb_tunnel_activate(tunnel)) {
431 tb_port_info(up,
432 "PCIe tunnel activation failed, aborting\n");
433 tb_tunnel_free(tunnel);
434 return -EIO;
435 }
436
437 list_add_tail(&tunnel->list, &tcm->tunnel_list);
438 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200439}
Andreas Noever9da672a2014-06-03 22:04:05 +0200440
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300441static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
442{
443 struct tb_cm *tcm = tb_priv(tb);
444 struct tb_port *nhi_port, *dst_port;
445 struct tb_tunnel *tunnel;
446 struct tb_switch *sw;
447
448 sw = tb_to_switch(xd->dev.parent);
449 dst_port = tb_port_at(xd->route, sw);
450 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
451
452 mutex_lock(&tb->lock);
453 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
454 xd->transmit_path, xd->receive_ring,
455 xd->receive_path);
456 if (!tunnel) {
457 mutex_unlock(&tb->lock);
458 return -ENOMEM;
459 }
460
461 if (tb_tunnel_activate(tunnel)) {
462 tb_port_info(nhi_port,
463 "DMA tunnel activation failed, aborting\n");
464 tb_tunnel_free(tunnel);
465 mutex_unlock(&tb->lock);
466 return -EIO;
467 }
468
469 list_add_tail(&tunnel->list, &tcm->tunnel_list);
470 mutex_unlock(&tb->lock);
471 return 0;
472}
473
474static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
475{
476 struct tb_port *dst_port;
477 struct tb_switch *sw;
478
479 sw = tb_to_switch(xd->dev.parent);
480 dst_port = tb_port_at(xd->route, sw);
481
482 /*
483 * It is possible that the tunnel was already teared down (in
484 * case of cable disconnect) so it is fine if we cannot find it
485 * here anymore.
486 */
487 tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
488}
489
490static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
491{
492 if (!xd->is_unplugged) {
493 mutex_lock(&tb->lock);
494 __tb_disconnect_xdomain_paths(tb, xd);
495 mutex_unlock(&tb->lock);
496 }
497 return 0;
498}
499
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200500/* hotplug handling */
501
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200502/**
503 * tb_handle_hotplug() - handle hotplug event
504 *
505 * Executes on tb->wq.
506 */
507static void tb_handle_hotplug(struct work_struct *work)
508{
509 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
510 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300511 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200512 struct tb_switch *sw;
513 struct tb_port *port;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200514 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300515 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200516 goto out; /* during init, suspend or shutdown */
517
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200518 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +0200519 if (!sw) {
520 tb_warn(tb,
521 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
522 ev->route, ev->port, ev->unplug);
523 goto out;
524 }
525 if (ev->port > sw->config.max_port_number) {
526 tb_warn(tb,
527 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
528 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200529 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200530 }
531 port = &sw->ports[ev->port];
532 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200533 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
534 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200535 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200536 }
537 if (ev->unplug) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200538 if (tb_port_has_remote(port)) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300539 tb_port_dbg(port, "switch unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100540 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200541 tb_free_invalid_tunnels(tb);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200542 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300543 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200544 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200545 if (port->dual_link_port)
546 port->dual_link_port->remote = NULL;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300547 } else if (port->xdomain) {
548 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
549
550 tb_port_dbg(port, "xdomain unplugged\n");
551 /*
552 * Service drivers are unbound during
553 * tb_xdomain_remove() so setting XDomain as
554 * unplugged here prevents deadlock if they call
555 * tb_xdomain_disable_paths(). We will tear down
556 * the path below.
557 */
558 xd->is_unplugged = true;
559 tb_xdomain_remove(xd);
560 port->xdomain = NULL;
561 __tb_disconnect_xdomain_paths(tb, xd);
562 tb_xdomain_put(xd);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300563 } else if (tb_port_is_dpout(port)) {
564 tb_teardown_dp(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +0200565 } else {
Mika Westerberg62efe692018-09-17 16:32:13 +0300566 tb_port_dbg(port,
567 "got unplug event for disconnected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200568 }
569 } else if (port->remote) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300570 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200571 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +0300572 if (tb_port_is_null(port)) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300573 tb_port_dbg(port, "hotplug: scanning\n");
Mika Westerberg344e0642017-10-11 17:19:54 +0300574 tb_scan_port(port);
575 if (!port->remote)
Mika Westerberg62efe692018-09-17 16:32:13 +0300576 tb_port_dbg(port, "hotplug: no switch found\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300577 } else if (tb_port_is_dpout(port)) {
578 tb_tunnel_dp(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +0300579 }
Andreas Noever053596d2014-06-03 22:04:06 +0200580 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200581
582put_sw:
583 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200584out:
585 mutex_unlock(&tb->lock);
586 kfree(ev);
587}
588
589/**
590 * tb_schedule_hotplug_handler() - callback function for the control channel
591 *
592 * Delegates to tb_handle_hotplug.
593 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300594static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
595 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200596{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300597 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300598 u64 route;
599
600 if (type != TB_CFG_PKG_EVENT) {
601 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
602 return;
603 }
604
605 route = tb_cfg_get_route(&pkg->header);
606
607 if (tb_cfg_error(tb->ctl, route, pkg->port,
608 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
609 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
610 pkg->port);
611 }
612
Mika Westerberg4f807e42018-09-17 16:30:49 +0300613 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200614}
615
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300616static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200617{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300618 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200619 struct tb_tunnel *tunnel;
620 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200621
Andreas Noever3364f0c2014-06-03 22:04:08 +0200622 /* tunnels are only present after everything has been initialized */
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300623 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
624 /*
625 * DMA tunnels require the driver to be functional so we
626 * tear them down. Other protocol tunnels can be left
627 * intact.
628 */
629 if (tb_tunnel_is_dma(tunnel))
630 tb_tunnel_deactivate(tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200631 tb_tunnel_free(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300632 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300633 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300634 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200635}
636
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200637static int tb_scan_finalize_switch(struct device *dev, void *data)
638{
639 if (tb_is_switch(dev)) {
640 struct tb_switch *sw = tb_to_switch(dev);
641
642 /*
643 * If we found that the switch was already setup by the
644 * boot firmware, mark it as authorized now before we
645 * send uevent to userspace.
646 */
647 if (sw->boot)
648 sw->authorized = 1;
649
650 dev_set_uevent_suppress(dev, false);
651 kobject_uevent(&dev->kobj, KOBJ_ADD);
652 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
653 }
654
655 return 0;
656}
657
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300658static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200659{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300660 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300661 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200662
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300663 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Mika Westerberg444ac382018-12-30 12:17:52 +0200664 if (IS_ERR(tb->root_switch))
665 return PTR_ERR(tb->root_switch);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200666
Mika Westerberge6b245c2017-06-06 15:25:17 +0300667 /*
668 * ICM firmware upgrade needs running firmware and in native
669 * mode that is not available so disable firmware upgrade of the
670 * root switch.
671 */
672 tb->root_switch->no_nvm_upgrade = true;
673
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300674 ret = tb_switch_configure(tb->root_switch);
675 if (ret) {
676 tb_switch_put(tb->root_switch);
677 return ret;
678 }
679
680 /* Announce the switch to the world */
681 ret = tb_switch_add(tb->root_switch);
682 if (ret) {
683 tb_switch_put(tb->root_switch);
684 return ret;
685 }
686
Andreas Noever9da672a2014-06-03 22:04:05 +0200687 /* Full scan to discover devices added before the driver was loaded. */
688 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200689 /* Find out tunnels created by the boot firmware */
690 tb_discover_tunnels(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200691 /* Make the discovered switches available to the userspace */
692 device_for_each_child(&tb->root_switch->dev, NULL,
693 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +0200694
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200695 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300696 tcm->hotplug_active = true;
697 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200698}
699
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300700static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200701{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300702 struct tb_cm *tcm = tb_priv(tb);
703
Mika Westerbergdaa51402018-10-01 12:31:19 +0300704 tb_dbg(tb, "suspending...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200705 tb_switch_suspend(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300706 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300707 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300708
709 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200710}
711
Mika Westerberg91c0c122019-03-21 19:03:00 +0200712static void tb_restore_children(struct tb_switch *sw)
713{
714 struct tb_port *port;
715
716 tb_switch_for_each_port(sw, port) {
717 if (!tb_port_has_remote(port))
718 continue;
719
720 if (tb_switch_lane_bonding_enable(port->remote->sw))
721 dev_warn(&sw->dev, "failed to restore lane bonding\n");
722
723 tb_restore_children(port->remote->sw);
724 }
725}
726
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300727static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200728{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300729 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200730 struct tb_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300731
Mika Westerbergdaa51402018-10-01 12:31:19 +0300732 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200733
734 /* remove any pci devices the firmware might have setup */
735 tb_switch_reset(tb, 0);
736
737 tb_switch_resume(tb->root_switch);
738 tb_free_invalid_tunnels(tb);
739 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200740 tb_restore_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300741 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200742 tb_tunnel_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300743 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200744 /*
745 * the pcie links need some time to get going.
746 * 100ms works for me...
747 */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300748 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200749 msleep(100);
750 }
751 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300752 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +0300753 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300754
755 return 0;
756}
757
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300758static int tb_free_unplugged_xdomains(struct tb_switch *sw)
759{
Mika Westerbergb433d012019-09-30 14:07:22 +0300760 struct tb_port *port;
761 int ret = 0;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300762
Mika Westerbergb433d012019-09-30 14:07:22 +0300763 tb_switch_for_each_port(sw, port) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300764 if (tb_is_upstream_port(port))
765 continue;
766 if (port->xdomain && port->xdomain->is_unplugged) {
767 tb_xdomain_remove(port->xdomain);
768 port->xdomain = NULL;
769 ret++;
770 } else if (port->remote) {
771 ret += tb_free_unplugged_xdomains(port->remote->sw);
772 }
773 }
774
775 return ret;
776}
777
778static void tb_complete(struct tb *tb)
779{
780 /*
781 * Release any unplugged XDomains and if there is a case where
782 * another domain is swapped in place of unplugged XDomain we
783 * need to run another rescan.
784 */
785 mutex_lock(&tb->lock);
786 if (tb_free_unplugged_xdomains(tb->root_switch))
787 tb_scan_switch(tb->root_switch);
788 mutex_unlock(&tb->lock);
789}
790
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300791static const struct tb_cm_ops tb_cm_ops = {
792 .start = tb_start,
793 .stop = tb_stop,
794 .suspend_noirq = tb_suspend_noirq,
795 .resume_noirq = tb_resume_noirq,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300796 .complete = tb_complete,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300797 .handle_event = tb_handle_event,
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200798 .approve_switch = tb_tunnel_pci,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300799 .approve_xdomain_paths = tb_approve_xdomain_paths,
800 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300801};
802
803struct tb *tb_probe(struct tb_nhi *nhi)
804{
805 struct tb_cm *tcm;
806 struct tb *tb;
807
Lukas Wunner630b3af2017-08-01 14:10:41 +0200808 if (!x86_apple_machine)
Mika Westerbergf67cf492017-06-06 15:25:16 +0300809 return NULL;
810
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300811 tb = tb_domain_alloc(nhi, sizeof(*tcm));
812 if (!tb)
813 return NULL;
814
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200815 tb->security_level = TB_SECURITY_USER;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300816 tb->cm_ops = &tb_cm_ops;
817
818 tcm = tb_priv(tb);
819 INIT_LIST_HEAD(&tcm->tunnel_list);
820
821 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200822}