blob: c5e96e7ac37abaf80f0ad5dd4d4267f42ea15d5d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
Lukas Wunner630b3af2017-08-01 14:10:41 +020012#include <linux/platform_data/x86/apple.h>
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020013
14#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020015#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020016#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020017
Mika Westerberg9d3cce02017-06-06 15:25:00 +030018/**
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
25 */
26struct tb_cm {
27 struct list_head tunnel_list;
28 bool hotplug_active;
29};
Andreas Noever9da672a2014-06-03 22:04:05 +020030
Mika Westerberg4f807e42018-09-17 16:30:49 +030031struct tb_hotplug_event {
32 struct work_struct work;
33 struct tb *tb;
34 u64 route;
35 u8 port;
36 bool unplug;
37};
38
39static void tb_handle_hotplug(struct work_struct *work);
40
41static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42{
43 struct tb_hotplug_event *ev;
44
45 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46 if (!ev)
47 return;
48
49 ev->tb = tb;
50 ev->route = route;
51 ev->port = port;
52 ev->unplug = unplug;
53 INIT_WORK(&ev->work, tb_handle_hotplug);
54 queue_work(tb->wq, &ev->work);
55}
56
Andreas Noever9da672a2014-06-03 22:04:05 +020057/* enumeration & hot plug handling */
58
Mika Westerberg0414bec2017-02-19 23:43:26 +020059static void tb_discover_tunnels(struct tb_switch *sw)
60{
61 struct tb *tb = sw->tb;
62 struct tb_cm *tcm = tb_priv(tb);
63 struct tb_port *port;
64 int i;
65
66 for (i = 1; i <= sw->config.max_port_number; i++) {
67 struct tb_tunnel *tunnel = NULL;
68
69 port = &sw->ports[i];
70 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +030071 case TB_TYPE_DP_HDMI_IN:
72 tunnel = tb_tunnel_discover_dp(tb, port);
73 break;
74
Mika Westerberg0414bec2017-02-19 23:43:26 +020075 case TB_TYPE_PCIE_DOWN:
76 tunnel = tb_tunnel_discover_pci(tb, port);
77 break;
78
79 default:
80 break;
81 }
82
Mika Westerberg4f807e42018-09-17 16:30:49 +030083 if (!tunnel)
84 continue;
85
86 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +020087 struct tb_switch *parent = tunnel->dst_port->sw;
88
89 while (parent != tunnel->src_port->sw) {
90 parent->boot = true;
91 parent = tb_switch_parent(parent);
92 }
Mika Westerberg0414bec2017-02-19 23:43:26 +020093 }
Mika Westerberg4f807e42018-09-17 16:30:49 +030094
95 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0414bec2017-02-19 23:43:26 +020096 }
97
98 for (i = 1; i <= sw->config.max_port_number; i++) {
99 if (tb_port_has_remote(&sw->ports[i]))
100 tb_discover_tunnels(sw->ports[i].remote->sw);
101 }
102}
Andreas Noever9da672a2014-06-03 22:04:05 +0200103
104static void tb_scan_port(struct tb_port *port);
105
106/**
107 * tb_scan_switch() - scan for and initialize downstream switches
108 */
109static void tb_scan_switch(struct tb_switch *sw)
110{
111 int i;
112 for (i = 1; i <= sw->config.max_port_number; i++)
113 tb_scan_port(&sw->ports[i]);
114}
115
116/**
117 * tb_scan_port() - check for and initialize switches below port
118 */
119static void tb_scan_port(struct tb_port *port)
120{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200121 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200122 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200123 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200124
Andreas Noever9da672a2014-06-03 22:04:05 +0200125 if (tb_is_upstream_port(port))
126 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300127
128 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
129 !tb_dp_port_is_enabled(port)) {
130 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
131 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
132 false);
133 return;
134 }
135
Andreas Noever9da672a2014-06-03 22:04:05 +0200136 if (port->config.type != TB_TYPE_PORT)
137 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200138 if (port->dual_link_port && port->link_nr)
139 return; /*
140 * Downstream switch is reachable through two ports.
141 * Only scan on the primary port (link_nr == 0).
142 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200143 if (tb_wait_for_port(port, false) <= 0)
144 return;
145 if (port->remote) {
146 tb_port_WARN(port, "port already has a remote!\n");
147 return;
148 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300149 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
150 tb_downstream_route(port));
Andreas Noever9da672a2014-06-03 22:04:05 +0200151 if (!sw)
152 return;
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300153
154 if (tb_switch_configure(sw)) {
155 tb_switch_put(sw);
156 return;
157 }
158
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200159 /*
160 * Do not send uevents until we have discovered all existing
161 * tunnels and know which switches were authorized already by
162 * the boot firmware.
163 */
164 if (!tcm->hotplug_active)
165 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300166
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300167 if (tb_switch_add(sw)) {
168 tb_switch_put(sw);
169 return;
170 }
171
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200172 /* Link the switches using both links if available */
173 upstream_port = tb_upstream_port(sw);
174 port->remote = upstream_port;
175 upstream_port->remote = port;
176 if (port->dual_link_port && upstream_port->dual_link_port) {
177 port->dual_link_port->remote = upstream_port->dual_link_port;
178 upstream_port->dual_link_port->remote = port->dual_link_port;
179 }
180
Andreas Noever9da672a2014-06-03 22:04:05 +0200181 tb_scan_switch(sw);
182}
183
Mika Westerberg4f807e42018-09-17 16:30:49 +0300184static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
185 struct tb_port *src_port, struct tb_port *dst_port)
186{
187 struct tb_cm *tcm = tb_priv(tb);
188 struct tb_tunnel *tunnel;
189
190 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
191 if (tunnel->type == type &&
192 ((src_port && src_port == tunnel->src_port) ||
193 (dst_port && dst_port == tunnel->dst_port))) {
194 tb_tunnel_deactivate(tunnel);
195 list_del(&tunnel->list);
196 tb_tunnel_free(tunnel);
197 return 0;
198 }
199 }
200
201 return -ENODEV;
202}
203
Andreas Noever3364f0c2014-06-03 22:04:08 +0200204/**
205 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
206 */
207static void tb_free_invalid_tunnels(struct tb *tb)
208{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300209 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200210 struct tb_tunnel *tunnel;
211 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300212
213 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200214 if (tb_tunnel_is_invalid(tunnel)) {
215 tb_tunnel_deactivate(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300216 list_del(&tunnel->list);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200217 tb_tunnel_free(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200218 }
219 }
220}
221
222/**
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200223 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
224 */
225static void tb_free_unplugged_children(struct tb_switch *sw)
226{
227 int i;
228 for (i = 1; i <= sw->config.max_port_number; i++) {
229 struct tb_port *port = &sw->ports[i];
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200230
231 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200232 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200233
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200234 if (port->remote->sw->is_unplugged) {
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300235 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200236 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200237 if (port->dual_link_port)
238 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200239 } else {
240 tb_free_unplugged_children(port->remote->sw);
241 }
242 }
243}
244
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200245/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300246 * tb_find_port() - return the first port of @type on @sw or NULL
247 * @sw: Switch to find the port from
248 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200249 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300250static struct tb_port *tb_find_port(struct tb_switch *sw,
251 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200252{
253 int i;
254 for (i = 1; i <= sw->config.max_port_number; i++)
Mika Westerberge78db6f2017-10-12 16:45:50 +0300255 if (sw->ports[i].config.type == type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200256 return &sw->ports[i];
257 return NULL;
258}
259
260/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300261 * tb_find_unused_port() - return the first inactive port on @sw
262 * @sw: Switch to find the port on
263 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200264 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300265static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
266 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200267{
268 int i;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300269
Andreas Noever3364f0c2014-06-03 22:04:08 +0200270 for (i = 1; i <= sw->config.max_port_number; i++) {
271 if (tb_is_upstream_port(&sw->ports[i]))
272 continue;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300273 if (sw->ports[i].config.type != type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200274 continue;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300275 if (!sw->ports[i].cap_adap)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200276 continue;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300277 if (tb_port_is_enabled(&sw->ports[i]))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200278 continue;
279 return &sw->ports[i];
280 }
281 return NULL;
282}
283
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200284static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
285 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200286{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200287 /*
288 * To keep plugging devices consistently in the same PCIe
289 * hierarchy, do mapping here for root switch downstream PCIe
290 * ports.
291 */
292 if (!tb_route(sw)) {
293 int phy_port = tb_phy_port_from_link(port->port);
294 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300295
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200296 /*
297 * Hard-coded Thunderbolt port to PCIe down port mapping
298 * per controller.
299 */
300 if (tb_switch_is_cr(sw))
301 index = !phy_port ? 6 : 7;
302 else if (tb_switch_is_fr(sw))
303 index = !phy_port ? 6 : 8;
304 else
305 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200306
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200307 /* Validate the hard-coding */
308 if (WARN_ON(index > sw->config.max_port_number))
309 goto out;
310 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
311 goto out;
312 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
313 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200314
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200315 return &sw->ports[index];
Andreas Noever3364f0c2014-06-03 22:04:08 +0200316 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200317
318out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300319 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200320}
321
Mika Westerberg4f807e42018-09-17 16:30:49 +0300322static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
323{
324 struct tb_cm *tcm = tb_priv(tb);
325 struct tb_switch *sw = out->sw;
326 struct tb_tunnel *tunnel;
327 struct tb_port *in;
328
329 if (tb_port_is_enabled(out))
330 return 0;
331
332 do {
333 sw = tb_to_switch(sw->dev.parent);
334 if (!sw)
335 return 0;
336 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
337 } while (!in);
338
339 tunnel = tb_tunnel_alloc_dp(tb, in, out);
340 if (!tunnel) {
341 tb_port_dbg(out, "DP tunnel allocation failed\n");
342 return -ENOMEM;
343 }
344
345 if (tb_tunnel_activate(tunnel)) {
346 tb_port_info(out, "DP tunnel activation failed, aborting\n");
347 tb_tunnel_free(tunnel);
348 return -EIO;
349 }
350
351 list_add_tail(&tunnel->list, &tcm->tunnel_list);
352 return 0;
353}
354
355static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
356{
357 tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
358}
359
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200360static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
361{
362 struct tb_port *up, *down, *port;
363 struct tb_cm *tcm = tb_priv(tb);
364 struct tb_switch *parent_sw;
365 struct tb_tunnel *tunnel;
366
Mika Westerberge78db6f2017-10-12 16:45:50 +0300367 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200368 if (!up)
369 return 0;
370
371 /*
372 * Look up available down port. Since we are chaining it should
373 * be found right above this switch.
374 */
375 parent_sw = tb_to_switch(sw->dev.parent);
376 port = tb_port_at(tb_route(sw), parent_sw);
377 down = tb_find_pcie_down(parent_sw, port);
378 if (!down)
379 return 0;
380
381 tunnel = tb_tunnel_alloc_pci(tb, up, down);
382 if (!tunnel)
383 return -ENOMEM;
384
385 if (tb_tunnel_activate(tunnel)) {
386 tb_port_info(up,
387 "PCIe tunnel activation failed, aborting\n");
388 tb_tunnel_free(tunnel);
389 return -EIO;
390 }
391
392 list_add_tail(&tunnel->list, &tcm->tunnel_list);
393 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200394}
Andreas Noever9da672a2014-06-03 22:04:05 +0200395
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200396/* hotplug handling */
397
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200398/**
399 * tb_handle_hotplug() - handle hotplug event
400 *
401 * Executes on tb->wq.
402 */
403static void tb_handle_hotplug(struct work_struct *work)
404{
405 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
406 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300407 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200408 struct tb_switch *sw;
409 struct tb_port *port;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200410 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300411 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200412 goto out; /* during init, suspend or shutdown */
413
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200414 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +0200415 if (!sw) {
416 tb_warn(tb,
417 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
418 ev->route, ev->port, ev->unplug);
419 goto out;
420 }
421 if (ev->port > sw->config.max_port_number) {
422 tb_warn(tb,
423 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
424 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200425 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200426 }
427 port = &sw->ports[ev->port];
428 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200429 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
430 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200431 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200432 }
433 if (ev->unplug) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200434 if (tb_port_has_remote(port)) {
Andreas Noever053596d2014-06-03 22:04:06 +0200435 tb_port_info(port, "unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100436 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200437 tb_free_invalid_tunnels(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300438 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200439 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200440 if (port->dual_link_port)
441 port->dual_link_port->remote = NULL;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300442 } else if (tb_port_is_dpout(port)) {
443 tb_teardown_dp(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +0200444 } else {
445 tb_port_info(port,
446 "got unplug event for disconnected port, ignoring\n");
447 }
448 } else if (port->remote) {
449 tb_port_info(port,
450 "got plug event for connected port, ignoring\n");
451 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +0300452 if (tb_port_is_null(port)) {
453 tb_port_info(port, "hotplug: scanning\n");
454 tb_scan_port(port);
455 if (!port->remote)
456 tb_port_info(port, "hotplug: no switch found\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300457 } else if (tb_port_is_dpout(port)) {
458 tb_tunnel_dp(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +0300459 }
Andreas Noever053596d2014-06-03 22:04:06 +0200460 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200461
462put_sw:
463 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200464out:
465 mutex_unlock(&tb->lock);
466 kfree(ev);
467}
468
469/**
470 * tb_schedule_hotplug_handler() - callback function for the control channel
471 *
472 * Delegates to tb_handle_hotplug.
473 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300474static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
475 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200476{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300477 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300478 u64 route;
479
480 if (type != TB_CFG_PKG_EVENT) {
481 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
482 return;
483 }
484
485 route = tb_cfg_get_route(&pkg->header);
486
487 if (tb_cfg_error(tb->ctl, route, pkg->port,
488 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
489 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
490 pkg->port);
491 }
492
Mika Westerberg4f807e42018-09-17 16:30:49 +0300493 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200494}
495
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300496static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200497{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300498 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200499 struct tb_tunnel *tunnel;
500 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200501
Andreas Noever3364f0c2014-06-03 22:04:08 +0200502 /* tunnels are only present after everything has been initialized */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300503 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200504 tb_tunnel_deactivate(tunnel);
505 tb_tunnel_free(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200506 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300507 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300508 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200509}
510
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200511static int tb_scan_finalize_switch(struct device *dev, void *data)
512{
513 if (tb_is_switch(dev)) {
514 struct tb_switch *sw = tb_to_switch(dev);
515
516 /*
517 * If we found that the switch was already setup by the
518 * boot firmware, mark it as authorized now before we
519 * send uevent to userspace.
520 */
521 if (sw->boot)
522 sw->authorized = 1;
523
524 dev_set_uevent_suppress(dev, false);
525 kobject_uevent(&dev->kobj, KOBJ_ADD);
526 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
527 }
528
529 return 0;
530}
531
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300532static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200533{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300534 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300535 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200536
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300537 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200538 if (!tb->root_switch)
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300539 return -ENOMEM;
Andreas Noevera25c8b22014-06-03 22:04:02 +0200540
Mika Westerberge6b245c2017-06-06 15:25:17 +0300541 /*
542 * ICM firmware upgrade needs running firmware and in native
543 * mode that is not available so disable firmware upgrade of the
544 * root switch.
545 */
546 tb->root_switch->no_nvm_upgrade = true;
547
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300548 ret = tb_switch_configure(tb->root_switch);
549 if (ret) {
550 tb_switch_put(tb->root_switch);
551 return ret;
552 }
553
554 /* Announce the switch to the world */
555 ret = tb_switch_add(tb->root_switch);
556 if (ret) {
557 tb_switch_put(tb->root_switch);
558 return ret;
559 }
560
Andreas Noever9da672a2014-06-03 22:04:05 +0200561 /* Full scan to discover devices added before the driver was loaded. */
562 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200563 /* Find out tunnels created by the boot firmware */
564 tb_discover_tunnels(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200565 /* Make the discovered switches available to the userspace */
566 device_for_each_child(&tb->root_switch->dev, NULL,
567 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +0200568
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200569 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300570 tcm->hotplug_active = true;
571 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200572}
573
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300574static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200575{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300576 struct tb_cm *tcm = tb_priv(tb);
577
Mika Westerbergdaa51402018-10-01 12:31:19 +0300578 tb_dbg(tb, "suspending...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200579 tb_switch_suspend(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300580 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300581 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300582
583 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200584}
585
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300586static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200587{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300588 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200589 struct tb_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300590
Mika Westerbergdaa51402018-10-01 12:31:19 +0300591 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200592
593 /* remove any pci devices the firmware might have setup */
594 tb_switch_reset(tb, 0);
595
596 tb_switch_resume(tb->root_switch);
597 tb_free_invalid_tunnels(tb);
598 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300599 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200600 tb_tunnel_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300601 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200602 /*
603 * the pcie links need some time to get going.
604 * 100ms works for me...
605 */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300606 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200607 msleep(100);
608 }
609 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300610 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +0300611 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300612
613 return 0;
614}
615
616static const struct tb_cm_ops tb_cm_ops = {
617 .start = tb_start,
618 .stop = tb_stop,
619 .suspend_noirq = tb_suspend_noirq,
620 .resume_noirq = tb_resume_noirq,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300621 .handle_event = tb_handle_event,
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200622 .approve_switch = tb_tunnel_pci,
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300623};
624
625struct tb *tb_probe(struct tb_nhi *nhi)
626{
627 struct tb_cm *tcm;
628 struct tb *tb;
629
Lukas Wunner630b3af2017-08-01 14:10:41 +0200630 if (!x86_apple_machine)
Mika Westerbergf67cf492017-06-06 15:25:16 +0300631 return NULL;
632
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300633 tb = tb_domain_alloc(nhi, sizeof(*tcm));
634 if (!tb)
635 return NULL;
636
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200637 tb->security_level = TB_SECURITY_USER;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300638 tb->cm_ops = &tb_cm_ops;
639
640 tcm = tb_priv(tb);
641 INIT_LIST_HEAD(&tcm->tunnel_list);
642
643 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200644}