blob: 8f58b9c3ef07b9026ae8a0f978bb55be1d07c652 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02002/*
Mika Westerberg99cabbb2018-12-30 21:34:08 +02003 * Thunderbolt driver - bus logic (NHI independent)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg99cabbb2018-12-30 21:34:08 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
Lukas Wunner630b3af2017-08-01 14:10:41 +020012#include <linux/platform_data/x86/apple.h>
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020013
14#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020015#include "tb_regs.h"
Mika Westerberg1752b9f2017-02-19 10:58:35 +020016#include "tunnel.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020017
Mika Westerberg9d3cce02017-06-06 15:25:00 +030018/**
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
Mika Westerberg8afe9092019-03-26 15:52:30 +030021 * @dp_resources: List of available DP resources for DP tunneling
Mika Westerberg9d3cce02017-06-06 15:25:00 +030022 * @hotplug_active: tb_handle_hotplug will stop progressing plug
23 * events and exit if this is not set (it needs to
24 * acquire the lock one more time). Used to drain wq
25 * after cfg has been paused.
26 */
27struct tb_cm {
28 struct list_head tunnel_list;
Mika Westerberg8afe9092019-03-26 15:52:30 +030029 struct list_head dp_resources;
Mika Westerberg9d3cce02017-06-06 15:25:00 +030030 bool hotplug_active;
31};
Andreas Noever9da672a2014-06-03 22:04:05 +020032
Mika Westerberg4f807e42018-09-17 16:30:49 +030033struct tb_hotplug_event {
34 struct work_struct work;
35 struct tb *tb;
36 u64 route;
37 u8 port;
38 bool unplug;
39};
40
41static void tb_handle_hotplug(struct work_struct *work);
42
43static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
44{
45 struct tb_hotplug_event *ev;
46
47 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
48 if (!ev)
49 return;
50
51 ev->tb = tb;
52 ev->route = route;
53 ev->port = port;
54 ev->unplug = unplug;
55 INIT_WORK(&ev->work, tb_handle_hotplug);
56 queue_work(tb->wq, &ev->work);
57}
58
Andreas Noever9da672a2014-06-03 22:04:05 +020059/* enumeration & hot plug handling */
60
Mika Westerberg8afe9092019-03-26 15:52:30 +030061static void tb_add_dp_resources(struct tb_switch *sw)
62{
63 struct tb_cm *tcm = tb_priv(sw->tb);
64 struct tb_port *port;
65
66 tb_switch_for_each_port(sw, port) {
67 if (!tb_port_is_dpin(port))
68 continue;
69
70 if (!tb_switch_query_dp_resource(sw, port))
71 continue;
72
73 list_add_tail(&port->list, &tcm->dp_resources);
74 tb_port_dbg(port, "DP IN resource available\n");
75 }
76}
77
78static void tb_remove_dp_resources(struct tb_switch *sw)
79{
80 struct tb_cm *tcm = tb_priv(sw->tb);
81 struct tb_port *port, *tmp;
82
83 /* Clear children resources first */
84 tb_switch_for_each_port(sw, port) {
85 if (tb_port_has_remote(port))
86 tb_remove_dp_resources(port->remote->sw);
87 }
88
89 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
90 if (port->sw == sw) {
91 tb_port_dbg(port, "DP OUT resource unavailable\n");
92 list_del_init(&port->list);
93 }
94 }
95}
96
Mika Westerberg0414bec2017-02-19 23:43:26 +020097static void tb_discover_tunnels(struct tb_switch *sw)
98{
99 struct tb *tb = sw->tb;
100 struct tb_cm *tcm = tb_priv(tb);
101 struct tb_port *port;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200102
Mika Westerbergb433d012019-09-30 14:07:22 +0300103 tb_switch_for_each_port(sw, port) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200104 struct tb_tunnel *tunnel = NULL;
105
Mika Westerberg0414bec2017-02-19 23:43:26 +0200106 switch (port->config.type) {
Mika Westerberg4f807e42018-09-17 16:30:49 +0300107 case TB_TYPE_DP_HDMI_IN:
108 tunnel = tb_tunnel_discover_dp(tb, port);
109 break;
110
Mika Westerberg0414bec2017-02-19 23:43:26 +0200111 case TB_TYPE_PCIE_DOWN:
112 tunnel = tb_tunnel_discover_pci(tb, port);
113 break;
114
115 default:
116 break;
117 }
118
Mika Westerberg4f807e42018-09-17 16:30:49 +0300119 if (!tunnel)
120 continue;
121
122 if (tb_tunnel_is_pci(tunnel)) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200123 struct tb_switch *parent = tunnel->dst_port->sw;
124
125 while (parent != tunnel->src_port->sw) {
126 parent->boot = true;
127 parent = tb_switch_parent(parent);
128 }
Mika Westerberg0414bec2017-02-19 23:43:26 +0200129 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300130
131 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200132 }
133
Mika Westerbergb433d012019-09-30 14:07:22 +0300134 tb_switch_for_each_port(sw, port) {
135 if (tb_port_has_remote(port))
136 tb_discover_tunnels(port->remote->sw);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200137 }
138}
Andreas Noever9da672a2014-06-03 22:04:05 +0200139
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300140static void tb_scan_xdomain(struct tb_port *port)
141{
142 struct tb_switch *sw = port->sw;
143 struct tb *tb = sw->tb;
144 struct tb_xdomain *xd;
145 u64 route;
146
147 route = tb_downstream_route(port);
148 xd = tb_xdomain_find_by_route(tb, route);
149 if (xd) {
150 tb_xdomain_put(xd);
151 return;
152 }
153
154 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
155 NULL);
156 if (xd) {
157 tb_port_at(route, sw)->xdomain = xd;
158 tb_xdomain_add(xd);
159 }
160}
161
Andreas Noever9da672a2014-06-03 22:04:05 +0200162static void tb_scan_port(struct tb_port *port);
163
164/**
165 * tb_scan_switch() - scan for and initialize downstream switches
166 */
167static void tb_scan_switch(struct tb_switch *sw)
168{
Mika Westerbergb433d012019-09-30 14:07:22 +0300169 struct tb_port *port;
170
171 tb_switch_for_each_port(sw, port)
172 tb_scan_port(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200173}
174
175/**
176 * tb_scan_port() - check for and initialize switches below port
177 */
178static void tb_scan_port(struct tb_port *port)
179{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200180 struct tb_cm *tcm = tb_priv(port->sw->tb);
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200181 struct tb_port *upstream_port;
Andreas Noever9da672a2014-06-03 22:04:05 +0200182 struct tb_switch *sw;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200183
Andreas Noever9da672a2014-06-03 22:04:05 +0200184 if (tb_is_upstream_port(port))
185 return;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300186
187 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
188 !tb_dp_port_is_enabled(port)) {
189 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
190 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
191 false);
192 return;
193 }
194
Andreas Noever9da672a2014-06-03 22:04:05 +0200195 if (port->config.type != TB_TYPE_PORT)
196 return;
Andreas Noever343fcb82014-06-12 23:11:47 +0200197 if (port->dual_link_port && port->link_nr)
198 return; /*
199 * Downstream switch is reachable through two ports.
200 * Only scan on the primary port (link_nr == 0).
201 */
Andreas Noever9da672a2014-06-03 22:04:05 +0200202 if (tb_wait_for_port(port, false) <= 0)
203 return;
204 if (port->remote) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300205 tb_port_dbg(port, "port already has a remote\n");
Andreas Noever9da672a2014-06-03 22:04:05 +0200206 return;
207 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300208 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
209 tb_downstream_route(port));
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300210 if (IS_ERR(sw)) {
211 /*
212 * If there is an error accessing the connected switch
213 * it may be connected to another domain. Also we allow
214 * the other domain to be connected to a max depth switch.
215 */
216 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
217 tb_scan_xdomain(port);
Andreas Noever9da672a2014-06-03 22:04:05 +0200218 return;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300219 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300220
221 if (tb_switch_configure(sw)) {
222 tb_switch_put(sw);
223 return;
224 }
225
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200226 /*
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300227 * If there was previously another domain connected remove it
228 * first.
229 */
230 if (port->xdomain) {
231 tb_xdomain_remove(port->xdomain);
232 port->xdomain = NULL;
233 }
234
235 /*
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200236 * Do not send uevents until we have discovered all existing
237 * tunnels and know which switches were authorized already by
238 * the boot firmware.
239 */
240 if (!tcm->hotplug_active)
241 dev_set_uevent_suppress(&sw->dev, true);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300242
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300243 if (tb_switch_add(sw)) {
244 tb_switch_put(sw);
245 return;
246 }
247
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200248 /* Link the switches using both links if available */
249 upstream_port = tb_upstream_port(sw);
250 port->remote = upstream_port;
251 upstream_port->remote = port;
252 if (port->dual_link_port && upstream_port->dual_link_port) {
253 port->dual_link_port->remote = upstream_port->dual_link_port;
254 upstream_port->dual_link_port->remote = port->dual_link_port;
255 }
256
Mika Westerberg91c0c122019-03-21 19:03:00 +0200257 /* Enable lane bonding if supported */
258 if (tb_switch_lane_bonding_enable(sw))
259 tb_sw_warn(sw, "failed to enable lane bonding\n");
260
Andreas Noever9da672a2014-06-03 22:04:05 +0200261 tb_scan_switch(sw);
262}
263
Mika Westerberg8afe9092019-03-26 15:52:30 +0300264static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
265 struct tb_port *src_port,
266 struct tb_port *dst_port)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300267{
268 struct tb_cm *tcm = tb_priv(tb);
269 struct tb_tunnel *tunnel;
270
271 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
272 if (tunnel->type == type &&
273 ((src_port && src_port == tunnel->src_port) ||
274 (dst_port && dst_port == tunnel->dst_port))) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300275 return tunnel;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300276 }
277 }
278
Mika Westerberg8afe9092019-03-26 15:52:30 +0300279 return NULL;
280}
281
282static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
283{
284 if (!tunnel)
285 return;
286
287 tb_tunnel_deactivate(tunnel);
288 list_del(&tunnel->list);
289
290 /*
291 * In case of DP tunnel make sure the DP IN resource is deallocated
292 * properly.
293 */
294 if (tb_tunnel_is_dp(tunnel)) {
295 struct tb_port *in = tunnel->src_port;
296
297 tb_switch_dealloc_dp_resource(in->sw, in);
298 }
299
300 tb_tunnel_free(tunnel);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300301}
302
Andreas Noever3364f0c2014-06-03 22:04:08 +0200303/**
304 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
305 */
306static void tb_free_invalid_tunnels(struct tb *tb)
307{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300308 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200309 struct tb_tunnel *tunnel;
310 struct tb_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300311
312 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300313 if (tb_tunnel_is_invalid(tunnel))
314 tb_deactivate_and_free_tunnel(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200315 }
316}
317
318/**
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200319 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
320 */
321static void tb_free_unplugged_children(struct tb_switch *sw)
322{
Mika Westerbergb433d012019-09-30 14:07:22 +0300323 struct tb_port *port;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200324
Mika Westerbergb433d012019-09-30 14:07:22 +0300325 tb_switch_for_each_port(sw, port) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200326 if (!tb_port_has_remote(port))
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200327 continue;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200328
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200329 if (port->remote->sw->is_unplugged) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300330 tb_remove_dp_resources(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200331 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300332 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200333 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200334 if (port->dual_link_port)
335 port->dual_link_port->remote = NULL;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200336 } else {
337 tb_free_unplugged_children(port->remote->sw);
338 }
339 }
340}
341
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200342/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300343 * tb_find_port() - return the first port of @type on @sw or NULL
344 * @sw: Switch to find the port from
345 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200346 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300347static struct tb_port *tb_find_port(struct tb_switch *sw,
348 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200349{
Mika Westerbergb433d012019-09-30 14:07:22 +0300350 struct tb_port *port;
351
352 tb_switch_for_each_port(sw, port) {
353 if (port->config.type == type)
354 return port;
355 }
356
Andreas Noever3364f0c2014-06-03 22:04:08 +0200357 return NULL;
358}
359
360/**
Mika Westerberge78db6f2017-10-12 16:45:50 +0300361 * tb_find_unused_port() - return the first inactive port on @sw
362 * @sw: Switch to find the port on
363 * @type: Port type to look for
Andreas Noever3364f0c2014-06-03 22:04:08 +0200364 */
Mika Westerberge78db6f2017-10-12 16:45:50 +0300365static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
366 enum tb_port_type type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200367{
Mika Westerbergb433d012019-09-30 14:07:22 +0300368 struct tb_port *port;
Mika Westerberge78db6f2017-10-12 16:45:50 +0300369
Mika Westerbergb433d012019-09-30 14:07:22 +0300370 tb_switch_for_each_port(sw, port) {
371 if (tb_is_upstream_port(port))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200372 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300373 if (port->config.type != type)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200374 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300375 if (port->cap_adap)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200376 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300377 if (tb_port_is_enabled(port))
Andreas Noever3364f0c2014-06-03 22:04:08 +0200378 continue;
Mika Westerbergb433d012019-09-30 14:07:22 +0300379 return port;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200380 }
381 return NULL;
382}
383
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200384static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
385 const struct tb_port *port)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200386{
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200387 /*
388 * To keep plugging devices consistently in the same PCIe
389 * hierarchy, do mapping here for root switch downstream PCIe
390 * ports.
391 */
392 if (!tb_route(sw)) {
393 int phy_port = tb_phy_port_from_link(port->port);
394 int index;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300395
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200396 /*
397 * Hard-coded Thunderbolt port to PCIe down port mapping
398 * per controller.
399 */
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200400 if (tb_switch_is_cactus_ridge(sw) ||
401 tb_switch_is_alpine_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200402 index = !phy_port ? 6 : 7;
Mika Westerberg17a8f812019-10-08 16:42:47 +0300403 else if (tb_switch_is_falcon_ridge(sw))
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200404 index = !phy_port ? 6 : 8;
Mika Westerberg7bffd97e2019-03-22 15:16:53 +0200405 else if (tb_switch_is_titan_ridge(sw))
406 index = !phy_port ? 8 : 9;
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200407 else
408 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200409
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200410 /* Validate the hard-coding */
411 if (WARN_ON(index > sw->config.max_port_number))
412 goto out;
413 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
414 goto out;
415 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
416 goto out;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200417
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200418 return &sw->ports[index];
Andreas Noever3364f0c2014-06-03 22:04:08 +0200419 }
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200420
421out:
Mika Westerberge78db6f2017-10-12 16:45:50 +0300422 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200423}
424
Mika Westerberg8afe9092019-03-26 15:52:30 +0300425static void tb_tunnel_dp(struct tb *tb)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300426{
427 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300428 struct tb_port *port, *in, *out;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300429 struct tb_tunnel *tunnel;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300430
Mika Westerberg8afe9092019-03-26 15:52:30 +0300431 /*
432 * Find pair of inactive DP IN and DP OUT adapters and then
433 * establish a DP tunnel between them.
434 */
435 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
Mika Westerberg4f807e42018-09-17 16:30:49 +0300436
Mika Westerberg8afe9092019-03-26 15:52:30 +0300437 in = NULL;
438 out = NULL;
439 list_for_each_entry(port, &tcm->dp_resources, list) {
440 if (tb_port_is_enabled(port)) {
441 tb_port_dbg(port, "in use\n");
442 continue;
443 }
444
445 tb_port_dbg(port, "available\n");
446
447 if (!in && tb_port_is_dpin(port))
448 in = port;
449 else if (!out && tb_port_is_dpout(port))
450 out = port;
451 }
452
453 if (!in) {
454 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
455 return;
456 }
457 if (!out) {
458 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
459 return;
460 }
461
462 if (tb_switch_alloc_dp_resource(in->sw, in)) {
463 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
464 return;
465 }
Mika Westerberg4f807e42018-09-17 16:30:49 +0300466
467 tunnel = tb_tunnel_alloc_dp(tb, in, out);
468 if (!tunnel) {
Mika Westerberg8afe9092019-03-26 15:52:30 +0300469 tb_port_dbg(out, "could not allocate DP tunnel\n");
470 goto dealloc_dp;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300471 }
472
473 if (tb_tunnel_activate(tunnel)) {
474 tb_port_info(out, "DP tunnel activation failed, aborting\n");
475 tb_tunnel_free(tunnel);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300476 goto dealloc_dp;
Mika Westerberg4f807e42018-09-17 16:30:49 +0300477 }
478
479 list_add_tail(&tunnel->list, &tcm->tunnel_list);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300480 return;
481
482dealloc_dp:
483 tb_switch_dealloc_dp_resource(in->sw, in);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300484}
485
Mika Westerberg8afe9092019-03-26 15:52:30 +0300486static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
Mika Westerberg4f807e42018-09-17 16:30:49 +0300487{
Mika Westerberg8afe9092019-03-26 15:52:30 +0300488 struct tb_port *in, *out;
489 struct tb_tunnel *tunnel;
490
491 if (tb_port_is_dpin(port)) {
492 tb_port_dbg(port, "DP IN resource unavailable\n");
493 in = port;
494 out = NULL;
495 } else {
496 tb_port_dbg(port, "DP OUT resource unavailable\n");
497 in = NULL;
498 out = port;
499 }
500
501 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
502 tb_deactivate_and_free_tunnel(tunnel);
503 list_del_init(&port->list);
504
505 /*
506 * See if there is another DP OUT port that can be used for
507 * to create another tunnel.
508 */
509 tb_tunnel_dp(tb);
510}
511
512static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
513{
514 struct tb_cm *tcm = tb_priv(tb);
515 struct tb_port *p;
516
517 if (tb_port_is_enabled(port))
518 return;
519
520 list_for_each_entry(p, &tcm->dp_resources, list) {
521 if (p == port)
522 return;
523 }
524
525 tb_port_dbg(port, "DP %s resource available\n",
526 tb_port_is_dpin(port) ? "IN" : "OUT");
527 list_add_tail(&port->list, &tcm->dp_resources);
528
529 /* Look for suitable DP IN <-> DP OUT pairs now */
530 tb_tunnel_dp(tb);
Mika Westerberg4f807e42018-09-17 16:30:49 +0300531}
532
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200533static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
534{
535 struct tb_port *up, *down, *port;
536 struct tb_cm *tcm = tb_priv(tb);
537 struct tb_switch *parent_sw;
538 struct tb_tunnel *tunnel;
539
Mika Westerberge78db6f2017-10-12 16:45:50 +0300540 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200541 if (!up)
542 return 0;
543
544 /*
545 * Look up available down port. Since we are chaining it should
546 * be found right above this switch.
547 */
548 parent_sw = tb_to_switch(sw->dev.parent);
549 port = tb_port_at(tb_route(sw), parent_sw);
550 down = tb_find_pcie_down(parent_sw, port);
551 if (!down)
552 return 0;
553
554 tunnel = tb_tunnel_alloc_pci(tb, up, down);
555 if (!tunnel)
556 return -ENOMEM;
557
558 if (tb_tunnel_activate(tunnel)) {
559 tb_port_info(up,
560 "PCIe tunnel activation failed, aborting\n");
561 tb_tunnel_free(tunnel);
562 return -EIO;
563 }
564
565 list_add_tail(&tunnel->list, &tcm->tunnel_list);
566 return 0;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200567}
Andreas Noever9da672a2014-06-03 22:04:05 +0200568
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300569static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
570{
571 struct tb_cm *tcm = tb_priv(tb);
572 struct tb_port *nhi_port, *dst_port;
573 struct tb_tunnel *tunnel;
574 struct tb_switch *sw;
575
576 sw = tb_to_switch(xd->dev.parent);
577 dst_port = tb_port_at(xd->route, sw);
578 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
579
580 mutex_lock(&tb->lock);
581 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
582 xd->transmit_path, xd->receive_ring,
583 xd->receive_path);
584 if (!tunnel) {
585 mutex_unlock(&tb->lock);
586 return -ENOMEM;
587 }
588
589 if (tb_tunnel_activate(tunnel)) {
590 tb_port_info(nhi_port,
591 "DMA tunnel activation failed, aborting\n");
592 tb_tunnel_free(tunnel);
593 mutex_unlock(&tb->lock);
594 return -EIO;
595 }
596
597 list_add_tail(&tunnel->list, &tcm->tunnel_list);
598 mutex_unlock(&tb->lock);
599 return 0;
600}
601
602static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
603{
604 struct tb_port *dst_port;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300605 struct tb_tunnel *tunnel;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300606 struct tb_switch *sw;
607
608 sw = tb_to_switch(xd->dev.parent);
609 dst_port = tb_port_at(xd->route, sw);
610
611 /*
612 * It is possible that the tunnel was already teared down (in
613 * case of cable disconnect) so it is fine if we cannot find it
614 * here anymore.
615 */
Mika Westerberg8afe9092019-03-26 15:52:30 +0300616 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
617 tb_deactivate_and_free_tunnel(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300618}
619
620static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
621{
622 if (!xd->is_unplugged) {
623 mutex_lock(&tb->lock);
624 __tb_disconnect_xdomain_paths(tb, xd);
625 mutex_unlock(&tb->lock);
626 }
627 return 0;
628}
629
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200630/* hotplug handling */
631
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200632/**
633 * tb_handle_hotplug() - handle hotplug event
634 *
635 * Executes on tb->wq.
636 */
637static void tb_handle_hotplug(struct work_struct *work)
638{
639 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
640 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300641 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200642 struct tb_switch *sw;
643 struct tb_port *port;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200644 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300645 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200646 goto out; /* during init, suspend or shutdown */
647
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200648 sw = tb_switch_find_by_route(tb, ev->route);
Andreas Noever053596d2014-06-03 22:04:06 +0200649 if (!sw) {
650 tb_warn(tb,
651 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
652 ev->route, ev->port, ev->unplug);
653 goto out;
654 }
655 if (ev->port > sw->config.max_port_number) {
656 tb_warn(tb,
657 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
658 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200659 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200660 }
661 port = &sw->ports[ev->port];
662 if (tb_is_upstream_port(port)) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200663 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
664 ev->route, ev->port, ev->unplug);
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200665 goto put_sw;
Andreas Noever053596d2014-06-03 22:04:06 +0200666 }
667 if (ev->unplug) {
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200668 if (tb_port_has_remote(port)) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300669 tb_port_dbg(port, "switch unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100670 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200671 tb_free_invalid_tunnels(tb);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300672 tb_remove_dp_resources(port->remote->sw);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200673 tb_switch_lane_bonding_disable(port->remote->sw);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300674 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200675 port->remote = NULL;
Mika Westerbergdfe40ca2019-03-07 15:26:45 +0200676 if (port->dual_link_port)
677 port->dual_link_port->remote = NULL;
Mika Westerberg8afe9092019-03-26 15:52:30 +0300678 /* Maybe we can create another DP tunnel */
679 tb_tunnel_dp(tb);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300680 } else if (port->xdomain) {
681 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
682
683 tb_port_dbg(port, "xdomain unplugged\n");
684 /*
685 * Service drivers are unbound during
686 * tb_xdomain_remove() so setting XDomain as
687 * unplugged here prevents deadlock if they call
688 * tb_xdomain_disable_paths(). We will tear down
689 * the path below.
690 */
691 xd->is_unplugged = true;
692 tb_xdomain_remove(xd);
693 port->xdomain = NULL;
694 __tb_disconnect_xdomain_paths(tb, xd);
695 tb_xdomain_put(xd);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300696 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
697 tb_dp_resource_unavailable(tb, port);
Andreas Noever053596d2014-06-03 22:04:06 +0200698 } else {
Mika Westerberg62efe692018-09-17 16:32:13 +0300699 tb_port_dbg(port,
700 "got unplug event for disconnected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200701 }
702 } else if (port->remote) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300703 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
Andreas Noever053596d2014-06-03 22:04:06 +0200704 } else {
Mika Westerberg344e0642017-10-11 17:19:54 +0300705 if (tb_port_is_null(port)) {
Mika Westerberg62efe692018-09-17 16:32:13 +0300706 tb_port_dbg(port, "hotplug: scanning\n");
Mika Westerberg344e0642017-10-11 17:19:54 +0300707 tb_scan_port(port);
708 if (!port->remote)
Mika Westerberg62efe692018-09-17 16:32:13 +0300709 tb_port_dbg(port, "hotplug: no switch found\n");
Mika Westerberg8afe9092019-03-26 15:52:30 +0300710 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
711 tb_dp_resource_available(tb, port);
Mika Westerberg344e0642017-10-11 17:19:54 +0300712 }
Andreas Noever053596d2014-06-03 22:04:06 +0200713 }
Mika Westerberg8f965ef2019-03-15 14:56:21 +0200714
715put_sw:
716 tb_switch_put(sw);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200717out:
718 mutex_unlock(&tb->lock);
719 kfree(ev);
720}
721
722/**
723 * tb_schedule_hotplug_handler() - callback function for the control channel
724 *
725 * Delegates to tb_handle_hotplug.
726 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300727static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
728 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200729{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300730 const struct cfg_event_pkg *pkg = buf;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300731 u64 route;
732
733 if (type != TB_CFG_PKG_EVENT) {
734 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
735 return;
736 }
737
738 route = tb_cfg_get_route(&pkg->header);
739
740 if (tb_cfg_error(tb->ctl, route, pkg->port,
741 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
742 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
743 pkg->port);
744 }
745
Mika Westerberg4f807e42018-09-17 16:30:49 +0300746 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200747}
748
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300749static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200750{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300751 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200752 struct tb_tunnel *tunnel;
753 struct tb_tunnel *n;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200754
Andreas Noever3364f0c2014-06-03 22:04:08 +0200755 /* tunnels are only present after everything has been initialized */
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300756 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
757 /*
758 * DMA tunnels require the driver to be functional so we
759 * tear them down. Other protocol tunnels can be left
760 * intact.
761 */
762 if (tb_tunnel_is_dma(tunnel))
763 tb_tunnel_deactivate(tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200764 tb_tunnel_free(tunnel);
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300765 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300766 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300767 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200768}
769
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200770static int tb_scan_finalize_switch(struct device *dev, void *data)
771{
772 if (tb_is_switch(dev)) {
773 struct tb_switch *sw = tb_to_switch(dev);
774
775 /*
776 * If we found that the switch was already setup by the
777 * boot firmware, mark it as authorized now before we
778 * send uevent to userspace.
779 */
780 if (sw->boot)
781 sw->authorized = 1;
782
783 dev_set_uevent_suppress(dev, false);
784 kobject_uevent(&dev->kobj, KOBJ_ADD);
785 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
786 }
787
788 return 0;
789}
790
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300791static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200792{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300793 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300794 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200795
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300796 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Mika Westerberg444ac382018-12-30 12:17:52 +0200797 if (IS_ERR(tb->root_switch))
798 return PTR_ERR(tb->root_switch);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200799
Mika Westerberge6b245c2017-06-06 15:25:17 +0300800 /*
801 * ICM firmware upgrade needs running firmware and in native
802 * mode that is not available so disable firmware upgrade of the
803 * root switch.
804 */
805 tb->root_switch->no_nvm_upgrade = true;
806
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300807 ret = tb_switch_configure(tb->root_switch);
808 if (ret) {
809 tb_switch_put(tb->root_switch);
810 return ret;
811 }
812
813 /* Announce the switch to the world */
814 ret = tb_switch_add(tb->root_switch);
815 if (ret) {
816 tb_switch_put(tb->root_switch);
817 return ret;
818 }
819
Andreas Noever9da672a2014-06-03 22:04:05 +0200820 /* Full scan to discover devices added before the driver was loaded. */
821 tb_scan_switch(tb->root_switch);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200822 /* Find out tunnels created by the boot firmware */
823 tb_discover_tunnels(tb->root_switch);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300824 /* Add DP IN resources for the root switch */
825 tb_add_dp_resources(tb->root_switch);
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200826 /* Make the discovered switches available to the userspace */
827 device_for_each_child(&tb->root_switch->dev, NULL,
828 tb_scan_finalize_switch);
Andreas Noever9da672a2014-06-03 22:04:05 +0200829
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200830 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300831 tcm->hotplug_active = true;
832 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200833}
834
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300835static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200836{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300837 struct tb_cm *tcm = tb_priv(tb);
838
Mika Westerbergdaa51402018-10-01 12:31:19 +0300839 tb_dbg(tb, "suspending...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200840 tb_switch_suspend(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300841 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300842 tb_dbg(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300843
844 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200845}
846
Mika Westerberg91c0c122019-03-21 19:03:00 +0200847static void tb_restore_children(struct tb_switch *sw)
848{
849 struct tb_port *port;
850
851 tb_switch_for_each_port(sw, port) {
852 if (!tb_port_has_remote(port))
853 continue;
854
855 if (tb_switch_lane_bonding_enable(port->remote->sw))
856 dev_warn(&sw->dev, "failed to restore lane bonding\n");
857
858 tb_restore_children(port->remote->sw);
859 }
860}
861
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300862static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200863{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300864 struct tb_cm *tcm = tb_priv(tb);
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200865 struct tb_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300866
Mika Westerbergdaa51402018-10-01 12:31:19 +0300867 tb_dbg(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200868
869 /* remove any pci devices the firmware might have setup */
870 tb_switch_reset(tb, 0);
871
872 tb_switch_resume(tb->root_switch);
873 tb_free_invalid_tunnels(tb);
874 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg91c0c122019-03-21 19:03:00 +0200875 tb_restore_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300876 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200877 tb_tunnel_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300878 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200879 /*
880 * the pcie links need some time to get going.
881 * 100ms works for me...
882 */
Mika Westerbergdaa51402018-10-01 12:31:19 +0300883 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200884 msleep(100);
885 }
886 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300887 tcm->hotplug_active = true;
Mika Westerbergdaa51402018-10-01 12:31:19 +0300888 tb_dbg(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300889
890 return 0;
891}
892
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300893static int tb_free_unplugged_xdomains(struct tb_switch *sw)
894{
Mika Westerbergb433d012019-09-30 14:07:22 +0300895 struct tb_port *port;
896 int ret = 0;
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300897
Mika Westerbergb433d012019-09-30 14:07:22 +0300898 tb_switch_for_each_port(sw, port) {
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300899 if (tb_is_upstream_port(port))
900 continue;
901 if (port->xdomain && port->xdomain->is_unplugged) {
902 tb_xdomain_remove(port->xdomain);
903 port->xdomain = NULL;
904 ret++;
905 } else if (port->remote) {
906 ret += tb_free_unplugged_xdomains(port->remote->sw);
907 }
908 }
909
910 return ret;
911}
912
913static void tb_complete(struct tb *tb)
914{
915 /*
916 * Release any unplugged XDomains and if there is a case where
917 * another domain is swapped in place of unplugged XDomain we
918 * need to run another rescan.
919 */
920 mutex_lock(&tb->lock);
921 if (tb_free_unplugged_xdomains(tb->root_switch))
922 tb_scan_switch(tb->root_switch);
923 mutex_unlock(&tb->lock);
924}
925
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300926static const struct tb_cm_ops tb_cm_ops = {
927 .start = tb_start,
928 .stop = tb_stop,
929 .suspend_noirq = tb_suspend_noirq,
930 .resume_noirq = tb_resume_noirq,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300931 .complete = tb_complete,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300932 .handle_event = tb_handle_event,
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200933 .approve_switch = tb_tunnel_pci,
Mika Westerberg7ea4cd62018-09-28 16:41:01 +0300934 .approve_xdomain_paths = tb_approve_xdomain_paths,
935 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300936};
937
938struct tb *tb_probe(struct tb_nhi *nhi)
939{
940 struct tb_cm *tcm;
941 struct tb *tb;
942
Lukas Wunner630b3af2017-08-01 14:10:41 +0200943 if (!x86_apple_machine)
Mika Westerbergf67cf492017-06-06 15:25:16 +0300944 return NULL;
945
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300946 tb = tb_domain_alloc(nhi, sizeof(*tcm));
947 if (!tb)
948 return NULL;
949
Mika Westerberg99cabbb2018-12-30 21:34:08 +0200950 tb->security_level = TB_SECURITY_USER;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300951 tb->cm_ops = &tb_cm_ops;
952
953 tcm = tb_priv(tb);
954 INIT_LIST_HEAD(&tcm->tunnel_list);
Mika Westerberg8afe9092019-03-26 15:52:30 +0300955 INIT_LIST_HEAD(&tcm->dp_resources);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300956
957 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200958}