Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 2 | /* |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 3 | * Thunderbolt driver - bus logic (NHI independent) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 6 | * Copyright (C) 2019, Intel Corporation |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/delay.h> |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 12 | #include <linux/pm_runtime.h> |
Mika Westerberg | 349bfe0 | 2021-05-24 17:57:32 +0300 | [diff] [blame] | 13 | #include <linux/platform_data/x86/apple.h> |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 14 | |
| 15 | #include "tb.h" |
Andreas Noever | 7adf609 | 2014-06-03 22:04:01 +0200 | [diff] [blame] | 16 | #include "tb_regs.h" |
Mika Westerberg | 1752b9f | 2017-02-19 10:58:35 +0200 | [diff] [blame] | 17 | #include "tunnel.h" |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 18 | |
Mika Westerberg | 7f0a34d | 2020-12-29 13:44:57 +0200 | [diff] [blame] | 19 | #define TB_TIMEOUT 100 /* ms */ |
| 20 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 21 | /** |
| 22 | * struct tb_cm - Simple Thunderbolt connection manager |
| 23 | * @tunnel_list: List of active tunnels |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 24 | * @dp_resources: List of available DP resources for DP tunneling |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 25 | * @hotplug_active: tb_handle_hotplug will stop progressing plug |
| 26 | * events and exit if this is not set (it needs to |
| 27 | * acquire the lock one more time). Used to drain wq |
| 28 | * after cfg has been paused. |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 29 | * @remove_work: Work used to remove any unplugged routers after |
| 30 | * runtime resume |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 31 | */ |
| 32 | struct tb_cm { |
| 33 | struct list_head tunnel_list; |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 34 | struct list_head dp_resources; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 35 | bool hotplug_active; |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 36 | struct delayed_work remove_work; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 37 | }; |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 38 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 39 | static inline struct tb *tcm_to_tb(struct tb_cm *tcm) |
| 40 | { |
| 41 | return ((void *)tcm - sizeof(struct tb)); |
| 42 | } |
| 43 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 44 | struct tb_hotplug_event { |
| 45 | struct work_struct work; |
| 46 | struct tb *tb; |
| 47 | u64 route; |
| 48 | u8 port; |
| 49 | bool unplug; |
| 50 | }; |
| 51 | |
| 52 | static void tb_handle_hotplug(struct work_struct *work); |
| 53 | |
| 54 | static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) |
| 55 | { |
| 56 | struct tb_hotplug_event *ev; |
| 57 | |
| 58 | ev = kmalloc(sizeof(*ev), GFP_KERNEL); |
| 59 | if (!ev) |
| 60 | return; |
| 61 | |
| 62 | ev->tb = tb; |
| 63 | ev->route = route; |
| 64 | ev->port = port; |
| 65 | ev->unplug = unplug; |
| 66 | INIT_WORK(&ev->work, tb_handle_hotplug); |
| 67 | queue_work(tb->wq, &ev->work); |
| 68 | } |
| 69 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 70 | /* enumeration & hot plug handling */ |
| 71 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 72 | static void tb_add_dp_resources(struct tb_switch *sw) |
| 73 | { |
| 74 | struct tb_cm *tcm = tb_priv(sw->tb); |
| 75 | struct tb_port *port; |
| 76 | |
| 77 | tb_switch_for_each_port(sw, port) { |
| 78 | if (!tb_port_is_dpin(port)) |
| 79 | continue; |
| 80 | |
| 81 | if (!tb_switch_query_dp_resource(sw, port)) |
| 82 | continue; |
| 83 | |
| 84 | list_add_tail(&port->list, &tcm->dp_resources); |
| 85 | tb_port_dbg(port, "DP IN resource available\n"); |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | static void tb_remove_dp_resources(struct tb_switch *sw) |
| 90 | { |
| 91 | struct tb_cm *tcm = tb_priv(sw->tb); |
| 92 | struct tb_port *port, *tmp; |
| 93 | |
| 94 | /* Clear children resources first */ |
| 95 | tb_switch_for_each_port(sw, port) { |
| 96 | if (tb_port_has_remote(port)) |
| 97 | tb_remove_dp_resources(port->remote->sw); |
| 98 | } |
| 99 | |
| 100 | list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { |
| 101 | if (port->sw == sw) { |
| 102 | tb_port_dbg(port, "DP OUT resource unavailable\n"); |
| 103 | list_del_init(&port->list); |
| 104 | } |
| 105 | } |
| 106 | } |
| 107 | |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 108 | static void tb_switch_discover_tunnels(struct tb_switch *sw, |
| 109 | struct list_head *list, |
| 110 | bool alloc_hopids) |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 111 | { |
| 112 | struct tb *tb = sw->tb; |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 113 | struct tb_port *port; |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 114 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 115 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 116 | struct tb_tunnel *tunnel = NULL; |
| 117 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 118 | switch (port->config.type) { |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 119 | case TB_TYPE_DP_HDMI_IN: |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 120 | tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 121 | break; |
| 122 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 123 | case TB_TYPE_PCIE_DOWN: |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 124 | tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 125 | break; |
| 126 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 127 | case TB_TYPE_USB3_DOWN: |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 128 | tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 129 | break; |
| 130 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 131 | default: |
| 132 | break; |
| 133 | } |
| 134 | |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 135 | if (tunnel) |
| 136 | list_add_tail(&tunnel->list, list); |
| 137 | } |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 138 | |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 139 | tb_switch_for_each_port(sw, port) { |
| 140 | if (tb_port_has_remote(port)) { |
| 141 | tb_switch_discover_tunnels(port->remote->sw, list, |
| 142 | alloc_hopids); |
| 143 | } |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | static void tb_discover_tunnels(struct tb *tb) |
| 148 | { |
| 149 | struct tb_cm *tcm = tb_priv(tb); |
| 150 | struct tb_tunnel *tunnel; |
| 151 | |
| 152 | tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); |
| 153 | |
| 154 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 155 | if (tb_tunnel_is_pci(tunnel)) { |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 156 | struct tb_switch *parent = tunnel->dst_port->sw; |
| 157 | |
| 158 | while (parent != tunnel->src_port->sw) { |
| 159 | parent->boot = true; |
| 160 | parent = tb_switch_parent(parent); |
| 161 | } |
Mika Westerberg | c94732b | 2020-12-10 14:57:10 +0200 | [diff] [blame] | 162 | } else if (tb_tunnel_is_dp(tunnel)) { |
| 163 | /* Keep the domain from powering down */ |
| 164 | pm_runtime_get_sync(&tunnel->src_port->sw->dev); |
| 165 | pm_runtime_get_sync(&tunnel->dst_port->sw->dev); |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 166 | } |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 167 | } |
| 168 | } |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 169 | |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 170 | static int tb_port_configure_xdomain(struct tb_port *port) |
| 171 | { |
Mika Westerberg | 341d451 | 2020-02-21 12:11:54 +0200 | [diff] [blame] | 172 | /* |
| 173 | * XDomain paths currently only support single lane so we must |
| 174 | * disable the other lane according to USB4 spec. |
| 175 | */ |
| 176 | tb_port_disable(port->dual_link_port); |
| 177 | |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 178 | if (tb_switch_is_usb4(port->sw)) |
| 179 | return usb4_port_configure_xdomain(port); |
| 180 | return tb_lc_configure_xdomain(port); |
| 181 | } |
| 182 | |
| 183 | static void tb_port_unconfigure_xdomain(struct tb_port *port) |
| 184 | { |
| 185 | if (tb_switch_is_usb4(port->sw)) |
| 186 | usb4_port_unconfigure_xdomain(port); |
| 187 | else |
| 188 | tb_lc_unconfigure_xdomain(port); |
Mika Westerberg | 341d451 | 2020-02-21 12:11:54 +0200 | [diff] [blame] | 189 | |
| 190 | tb_port_enable(port->dual_link_port); |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 191 | } |
| 192 | |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 193 | static void tb_scan_xdomain(struct tb_port *port) |
| 194 | { |
| 195 | struct tb_switch *sw = port->sw; |
| 196 | struct tb *tb = sw->tb; |
| 197 | struct tb_xdomain *xd; |
| 198 | u64 route; |
| 199 | |
Mika Westerberg | 5ca6768 | 2020-10-22 13:22:06 +0300 | [diff] [blame] | 200 | if (!tb_is_xdomain_enabled()) |
| 201 | return; |
| 202 | |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 203 | route = tb_downstream_route(port); |
| 204 | xd = tb_xdomain_find_by_route(tb, route); |
| 205 | if (xd) { |
| 206 | tb_xdomain_put(xd); |
| 207 | return; |
| 208 | } |
| 209 | |
| 210 | xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, |
| 211 | NULL); |
| 212 | if (xd) { |
| 213 | tb_port_at(route, sw)->xdomain = xd; |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 214 | tb_port_configure_xdomain(port); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 215 | tb_xdomain_add(xd); |
| 216 | } |
| 217 | } |
| 218 | |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 219 | static int tb_enable_tmu(struct tb_switch *sw) |
| 220 | { |
| 221 | int ret; |
| 222 | |
| 223 | /* If it is already enabled in correct mode, don't touch it */ |
Gil Fine | a28ec0e | 2021-12-17 03:16:38 +0200 | [diff] [blame] | 224 | if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request)) |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 225 | return 0; |
| 226 | |
| 227 | ret = tb_switch_tmu_disable(sw); |
| 228 | if (ret) |
| 229 | return ret; |
| 230 | |
| 231 | ret = tb_switch_tmu_post_time(sw); |
| 232 | if (ret) |
| 233 | return ret; |
| 234 | |
| 235 | return tb_switch_tmu_enable(sw); |
| 236 | } |
| 237 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 238 | /** |
| 239 | * tb_find_unused_port() - return the first inactive port on @sw |
| 240 | * @sw: Switch to find the port on |
| 241 | * @type: Port type to look for |
| 242 | */ |
| 243 | static struct tb_port *tb_find_unused_port(struct tb_switch *sw, |
| 244 | enum tb_port_type type) |
| 245 | { |
| 246 | struct tb_port *port; |
| 247 | |
| 248 | tb_switch_for_each_port(sw, port) { |
| 249 | if (tb_is_upstream_port(port)) |
| 250 | continue; |
| 251 | if (port->config.type != type) |
| 252 | continue; |
| 253 | if (!port->cap_adap) |
| 254 | continue; |
| 255 | if (tb_port_is_enabled(port)) |
| 256 | continue; |
| 257 | return port; |
| 258 | } |
| 259 | return NULL; |
| 260 | } |
| 261 | |
| 262 | static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, |
Mika Westerberg | 77cfa40 | 2020-03-11 16:00:46 +0300 | [diff] [blame] | 263 | const struct tb_port *port) |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 264 | { |
| 265 | struct tb_port *down; |
| 266 | |
| 267 | down = usb4_switch_map_usb3_down(sw, port); |
Mika Westerberg | 77cfa40 | 2020-03-11 16:00:46 +0300 | [diff] [blame] | 268 | if (down && !tb_usb3_port_is_enabled(down)) |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 269 | return down; |
Mika Westerberg | 77cfa40 | 2020-03-11 16:00:46 +0300 | [diff] [blame] | 270 | return NULL; |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 271 | } |
| 272 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 273 | static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, |
| 274 | struct tb_port *src_port, |
| 275 | struct tb_port *dst_port) |
| 276 | { |
| 277 | struct tb_cm *tcm = tb_priv(tb); |
| 278 | struct tb_tunnel *tunnel; |
| 279 | |
| 280 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
| 281 | if (tunnel->type == type && |
| 282 | ((src_port && src_port == tunnel->src_port) || |
| 283 | (dst_port && dst_port == tunnel->dst_port))) { |
| 284 | return tunnel; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | return NULL; |
| 289 | } |
| 290 | |
| 291 | static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, |
| 292 | struct tb_port *src_port, |
| 293 | struct tb_port *dst_port) |
| 294 | { |
| 295 | struct tb_port *port, *usb3_down; |
| 296 | struct tb_switch *sw; |
| 297 | |
| 298 | /* Pick the router that is deepest in the topology */ |
| 299 | if (dst_port->sw->config.depth > src_port->sw->config.depth) |
| 300 | sw = dst_port->sw; |
| 301 | else |
| 302 | sw = src_port->sw; |
| 303 | |
| 304 | /* Can't be the host router */ |
| 305 | if (sw == tb->root_switch) |
| 306 | return NULL; |
| 307 | |
| 308 | /* Find the downstream USB4 port that leads to this router */ |
| 309 | port = tb_port_at(tb_route(sw), tb->root_switch); |
| 310 | /* Find the corresponding host router USB3 downstream port */ |
| 311 | usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); |
| 312 | if (!usb3_down) |
| 313 | return NULL; |
| 314 | |
| 315 | return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); |
| 316 | } |
| 317 | |
| 318 | static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, |
| 319 | struct tb_port *dst_port, int *available_up, int *available_down) |
| 320 | { |
| 321 | int usb3_consumed_up, usb3_consumed_down, ret; |
| 322 | struct tb_cm *tcm = tb_priv(tb); |
| 323 | struct tb_tunnel *tunnel; |
| 324 | struct tb_port *port; |
| 325 | |
| 326 | tb_port_dbg(dst_port, "calculating available bandwidth\n"); |
| 327 | |
| 328 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
| 329 | if (tunnel) { |
| 330 | ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, |
| 331 | &usb3_consumed_down); |
| 332 | if (ret) |
| 333 | return ret; |
| 334 | } else { |
| 335 | usb3_consumed_up = 0; |
| 336 | usb3_consumed_down = 0; |
| 337 | } |
| 338 | |
| 339 | *available_up = *available_down = 40000; |
| 340 | |
| 341 | /* Find the minimum available bandwidth over all links */ |
| 342 | tb_for_each_port_on_path(src_port, dst_port, port) { |
| 343 | int link_speed, link_width, up_bw, down_bw; |
| 344 | |
| 345 | if (!tb_port_is_null(port)) |
| 346 | continue; |
| 347 | |
| 348 | if (tb_is_upstream_port(port)) { |
| 349 | link_speed = port->sw->link_speed; |
| 350 | } else { |
| 351 | link_speed = tb_port_get_link_speed(port); |
| 352 | if (link_speed < 0) |
| 353 | return link_speed; |
| 354 | } |
| 355 | |
| 356 | link_width = port->bonded ? 2 : 1; |
| 357 | |
| 358 | up_bw = link_speed * link_width * 1000; /* Mb/s */ |
| 359 | /* Leave 10% guard band */ |
| 360 | up_bw -= up_bw / 10; |
| 361 | down_bw = up_bw; |
| 362 | |
| 363 | tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw); |
| 364 | |
| 365 | /* |
| 366 | * Find all DP tunnels that cross the port and reduce |
| 367 | * their consumed bandwidth from the available. |
| 368 | */ |
| 369 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
| 370 | int dp_consumed_up, dp_consumed_down; |
| 371 | |
| 372 | if (!tb_tunnel_is_dp(tunnel)) |
| 373 | continue; |
| 374 | |
| 375 | if (!tb_tunnel_port_on_path(tunnel, port)) |
| 376 | continue; |
| 377 | |
| 378 | ret = tb_tunnel_consumed_bandwidth(tunnel, |
| 379 | &dp_consumed_up, |
| 380 | &dp_consumed_down); |
| 381 | if (ret) |
| 382 | return ret; |
| 383 | |
| 384 | up_bw -= dp_consumed_up; |
| 385 | down_bw -= dp_consumed_down; |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * If USB3 is tunneled from the host router down to the |
| 390 | * branch leading to port we need to take USB3 consumed |
| 391 | * bandwidth into account regardless whether it actually |
| 392 | * crosses the port. |
| 393 | */ |
| 394 | up_bw -= usb3_consumed_up; |
| 395 | down_bw -= usb3_consumed_down; |
| 396 | |
| 397 | if (up_bw < *available_up) |
| 398 | *available_up = up_bw; |
| 399 | if (down_bw < *available_down) |
| 400 | *available_down = down_bw; |
| 401 | } |
| 402 | |
| 403 | if (*available_up < 0) |
| 404 | *available_up = 0; |
| 405 | if (*available_down < 0) |
| 406 | *available_down = 0; |
| 407 | |
| 408 | return 0; |
| 409 | } |
| 410 | |
| 411 | static int tb_release_unused_usb3_bandwidth(struct tb *tb, |
| 412 | struct tb_port *src_port, |
| 413 | struct tb_port *dst_port) |
| 414 | { |
| 415 | struct tb_tunnel *tunnel; |
| 416 | |
| 417 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
| 418 | return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; |
| 419 | } |
| 420 | |
| 421 | static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, |
| 422 | struct tb_port *dst_port) |
| 423 | { |
| 424 | int ret, available_up, available_down; |
| 425 | struct tb_tunnel *tunnel; |
| 426 | |
| 427 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
| 428 | if (!tunnel) |
| 429 | return; |
| 430 | |
| 431 | tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); |
| 432 | |
| 433 | /* |
| 434 | * Calculate available bandwidth for the first hop USB3 tunnel. |
| 435 | * That determines the whole USB3 bandwidth for this branch. |
| 436 | */ |
| 437 | ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, |
| 438 | &available_up, &available_down); |
| 439 | if (ret) { |
| 440 | tb_warn(tb, "failed to calculate available bandwidth\n"); |
| 441 | return; |
| 442 | } |
| 443 | |
| 444 | tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", |
| 445 | available_up, available_down); |
| 446 | |
| 447 | tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); |
| 448 | } |
| 449 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 450 | static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) |
| 451 | { |
| 452 | struct tb_switch *parent = tb_switch_parent(sw); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 453 | int ret, available_up, available_down; |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 454 | struct tb_port *up, *down, *port; |
| 455 | struct tb_cm *tcm = tb_priv(tb); |
| 456 | struct tb_tunnel *tunnel; |
| 457 | |
Mika Westerberg | c6da62a | 2020-02-18 16:14:42 +0200 | [diff] [blame] | 458 | if (!tb_acpi_may_tunnel_usb3()) { |
| 459 | tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); |
| 460 | return 0; |
| 461 | } |
| 462 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 463 | up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); |
| 464 | if (!up) |
| 465 | return 0; |
| 466 | |
Mika Westerberg | bbcf40b | 2020-03-04 17:09:14 +0200 | [diff] [blame] | 467 | if (!sw->link_usb4) |
| 468 | return 0; |
| 469 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 470 | /* |
| 471 | * Look up available down port. Since we are chaining it should |
| 472 | * be found right above this switch. |
| 473 | */ |
| 474 | port = tb_port_at(tb_route(sw), parent); |
| 475 | down = tb_find_usb3_down(parent, port); |
| 476 | if (!down) |
| 477 | return 0; |
| 478 | |
| 479 | if (tb_route(parent)) { |
| 480 | struct tb_port *parent_up; |
| 481 | /* |
| 482 | * Check first that the parent switch has its upstream USB3 |
| 483 | * port enabled. Otherwise the chain is not complete and |
| 484 | * there is no point setting up a new tunnel. |
| 485 | */ |
| 486 | parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); |
| 487 | if (!parent_up || !tb_port_is_enabled(parent_up)) |
| 488 | return 0; |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 489 | |
| 490 | /* Make all unused bandwidth available for the new tunnel */ |
| 491 | ret = tb_release_unused_usb3_bandwidth(tb, down, up); |
| 492 | if (ret) |
| 493 | return ret; |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 494 | } |
| 495 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 496 | ret = tb_available_bandwidth(tb, down, up, &available_up, |
| 497 | &available_down); |
| 498 | if (ret) |
| 499 | goto err_reclaim; |
| 500 | |
| 501 | tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", |
| 502 | available_up, available_down); |
| 503 | |
| 504 | tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, |
| 505 | available_down); |
| 506 | if (!tunnel) { |
| 507 | ret = -ENOMEM; |
| 508 | goto err_reclaim; |
| 509 | } |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 510 | |
| 511 | if (tb_tunnel_activate(tunnel)) { |
| 512 | tb_port_info(up, |
| 513 | "USB3 tunnel activation failed, aborting\n"); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 514 | ret = -EIO; |
| 515 | goto err_free; |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 519 | if (tb_route(parent)) |
| 520 | tb_reclaim_usb3_bandwidth(tb, down, up); |
| 521 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 522 | return 0; |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 523 | |
| 524 | err_free: |
| 525 | tb_tunnel_free(tunnel); |
| 526 | err_reclaim: |
| 527 | if (tb_route(parent)) |
| 528 | tb_reclaim_usb3_bandwidth(tb, down, up); |
| 529 | |
| 530 | return ret; |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | static int tb_create_usb3_tunnels(struct tb_switch *sw) |
| 534 | { |
| 535 | struct tb_port *port; |
| 536 | int ret; |
| 537 | |
Mika Westerberg | c6da62a | 2020-02-18 16:14:42 +0200 | [diff] [blame] | 538 | if (!tb_acpi_may_tunnel_usb3()) |
| 539 | return 0; |
| 540 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 541 | if (tb_route(sw)) { |
| 542 | ret = tb_tunnel_usb3(sw->tb, sw); |
| 543 | if (ret) |
| 544 | return ret; |
| 545 | } |
| 546 | |
| 547 | tb_switch_for_each_port(sw, port) { |
| 548 | if (!tb_port_has_remote(port)) |
| 549 | continue; |
| 550 | ret = tb_create_usb3_tunnels(port->remote->sw); |
| 551 | if (ret) |
| 552 | return ret; |
| 553 | } |
| 554 | |
| 555 | return 0; |
| 556 | } |
| 557 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 558 | static void tb_scan_port(struct tb_port *port); |
| 559 | |
Lee Jones | 877e50b | 2021-01-27 11:25:50 +0000 | [diff] [blame] | 560 | /* |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 561 | * tb_scan_switch() - scan for and initialize downstream switches |
| 562 | */ |
| 563 | static void tb_scan_switch(struct tb_switch *sw) |
| 564 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 565 | struct tb_port *port; |
| 566 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 567 | pm_runtime_get_sync(&sw->dev); |
| 568 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 569 | tb_switch_for_each_port(sw, port) |
| 570 | tb_scan_port(port); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 571 | |
| 572 | pm_runtime_mark_last_busy(&sw->dev); |
| 573 | pm_runtime_put_autosuspend(&sw->dev); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 574 | } |
| 575 | |
Lee Jones | 877e50b | 2021-01-27 11:25:50 +0000 | [diff] [blame] | 576 | /* |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 577 | * tb_scan_port() - check for and initialize switches below port |
| 578 | */ |
| 579 | static void tb_scan_port(struct tb_port *port) |
| 580 | { |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 581 | struct tb_cm *tcm = tb_priv(port->sw->tb); |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 582 | struct tb_port *upstream_port; |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 583 | struct tb_switch *sw; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 584 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 585 | if (tb_is_upstream_port(port)) |
| 586 | return; |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 587 | |
| 588 | if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && |
| 589 | !tb_dp_port_is_enabled(port)) { |
| 590 | tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); |
| 591 | tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, |
| 592 | false); |
| 593 | return; |
| 594 | } |
| 595 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 596 | if (port->config.type != TB_TYPE_PORT) |
| 597 | return; |
Andreas Noever | 343fcb8 | 2014-06-12 23:11:47 +0200 | [diff] [blame] | 598 | if (port->dual_link_port && port->link_nr) |
| 599 | return; /* |
| 600 | * Downstream switch is reachable through two ports. |
| 601 | * Only scan on the primary port (link_nr == 0). |
| 602 | */ |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 603 | if (tb_wait_for_port(port, false) <= 0) |
| 604 | return; |
| 605 | if (port->remote) { |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 606 | tb_port_dbg(port, "port already has a remote\n"); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 607 | return; |
| 608 | } |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 609 | |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 610 | tb_retimer_scan(port, true); |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 611 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 612 | sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, |
| 613 | tb_downstream_route(port)); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 614 | if (IS_ERR(sw)) { |
| 615 | /* |
| 616 | * If there is an error accessing the connected switch |
| 617 | * it may be connected to another domain. Also we allow |
| 618 | * the other domain to be connected to a max depth switch. |
| 619 | */ |
| 620 | if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) |
| 621 | tb_scan_xdomain(port); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 622 | return; |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 623 | } |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 624 | |
| 625 | if (tb_switch_configure(sw)) { |
| 626 | tb_switch_put(sw); |
| 627 | return; |
| 628 | } |
| 629 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 630 | /* |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 631 | * If there was previously another domain connected remove it |
| 632 | * first. |
| 633 | */ |
| 634 | if (port->xdomain) { |
| 635 | tb_xdomain_remove(port->xdomain); |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 636 | tb_port_unconfigure_xdomain(port); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 637 | port->xdomain = NULL; |
| 638 | } |
| 639 | |
| 640 | /* |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 641 | * Do not send uevents until we have discovered all existing |
| 642 | * tunnels and know which switches were authorized already by |
| 643 | * the boot firmware. |
| 644 | */ |
| 645 | if (!tcm->hotplug_active) |
| 646 | dev_set_uevent_suppress(&sw->dev, true); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 647 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 648 | /* |
| 649 | * At the moment Thunderbolt 2 and beyond (devices with LC) we |
| 650 | * can support runtime PM. |
| 651 | */ |
| 652 | sw->rpm = sw->generation > 1; |
| 653 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 654 | if (tb_switch_add(sw)) { |
| 655 | tb_switch_put(sw); |
| 656 | return; |
| 657 | } |
| 658 | |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 659 | /* Link the switches using both links if available */ |
| 660 | upstream_port = tb_upstream_port(sw); |
| 661 | port->remote = upstream_port; |
| 662 | upstream_port->remote = port; |
| 663 | if (port->dual_link_port && upstream_port->dual_link_port) { |
| 664 | port->dual_link_port->remote = upstream_port->dual_link_port; |
| 665 | upstream_port->dual_link_port->remote = port->dual_link_port; |
| 666 | } |
| 667 | |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 668 | /* Enable lane bonding if supported */ |
Mika Westerberg | 2ca3263 | 2020-04-02 14:28:18 +0300 | [diff] [blame] | 669 | tb_switch_lane_bonding_enable(sw); |
Mika Westerberg | de46203 | 2020-04-02 14:50:52 +0300 | [diff] [blame] | 670 | /* Set the link configured */ |
| 671 | tb_switch_configure_link(sw); |
Gil Fine | 8a90e4f | 2021-12-17 03:16:39 +0200 | [diff] [blame] | 672 | if (tb_switch_enable_clx(sw, TB_CL0S)) |
| 673 | tb_sw_warn(sw, "failed to enable CLx on upstream port\n"); |
| 674 | |
| 675 | tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, |
| 676 | tb_switch_is_clx_enabled(sw)); |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 677 | |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 678 | if (tb_enable_tmu(sw)) |
| 679 | tb_sw_warn(sw, "failed to enable TMU\n"); |
| 680 | |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 681 | /* Scan upstream retimers */ |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 682 | tb_retimer_scan(upstream_port, true); |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 683 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 684 | /* |
| 685 | * Create USB 3.x tunnels only when the switch is plugged to the |
| 686 | * domain. This is because we scan the domain also during discovery |
| 687 | * and want to discover existing USB 3.x tunnels before we create |
| 688 | * any new. |
| 689 | */ |
| 690 | if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) |
| 691 | tb_sw_warn(sw, "USB3 tunnel creation failed\n"); |
| 692 | |
Mika Westerberg | e876f34 | 2020-04-02 12:53:14 +0300 | [diff] [blame] | 693 | tb_add_dp_resources(sw); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 694 | tb_scan_switch(sw); |
| 695 | } |
| 696 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 697 | static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) |
| 698 | { |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 699 | struct tb_port *src_port, *dst_port; |
| 700 | struct tb *tb; |
| 701 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 702 | if (!tunnel) |
| 703 | return; |
| 704 | |
| 705 | tb_tunnel_deactivate(tunnel); |
| 706 | list_del(&tunnel->list); |
| 707 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 708 | tb = tunnel->tb; |
| 709 | src_port = tunnel->src_port; |
| 710 | dst_port = tunnel->dst_port; |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 711 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 712 | switch (tunnel->type) { |
| 713 | case TB_TUNNEL_DP: |
| 714 | /* |
| 715 | * In case of DP tunnel make sure the DP IN resource is |
| 716 | * deallocated properly. |
| 717 | */ |
| 718 | tb_switch_dealloc_dp_resource(src_port->sw, src_port); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 719 | /* Now we can allow the domain to runtime suspend again */ |
| 720 | pm_runtime_mark_last_busy(&dst_port->sw->dev); |
| 721 | pm_runtime_put_autosuspend(&dst_port->sw->dev); |
| 722 | pm_runtime_mark_last_busy(&src_port->sw->dev); |
| 723 | pm_runtime_put_autosuspend(&src_port->sw->dev); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 724 | fallthrough; |
| 725 | |
| 726 | case TB_TUNNEL_USB3: |
| 727 | tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); |
| 728 | break; |
| 729 | |
| 730 | default: |
| 731 | /* |
| 732 | * PCIe and DMA tunnels do not consume guaranteed |
| 733 | * bandwidth. |
| 734 | */ |
| 735 | break; |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 736 | } |
| 737 | |
| 738 | tb_tunnel_free(tunnel); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 739 | } |
| 740 | |
Lee Jones | 877e50b | 2021-01-27 11:25:50 +0000 | [diff] [blame] | 741 | /* |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 742 | * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away |
| 743 | */ |
| 744 | static void tb_free_invalid_tunnels(struct tb *tb) |
| 745 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 746 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 747 | struct tb_tunnel *tunnel; |
| 748 | struct tb_tunnel *n; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 749 | |
| 750 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 751 | if (tb_tunnel_is_invalid(tunnel)) |
| 752 | tb_deactivate_and_free_tunnel(tunnel); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 753 | } |
| 754 | } |
| 755 | |
Lee Jones | 877e50b | 2021-01-27 11:25:50 +0000 | [diff] [blame] | 756 | /* |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 757 | * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches |
| 758 | */ |
| 759 | static void tb_free_unplugged_children(struct tb_switch *sw) |
| 760 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 761 | struct tb_port *port; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 762 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 763 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 764 | if (!tb_port_has_remote(port)) |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 765 | continue; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 766 | |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 767 | if (port->remote->sw->is_unplugged) { |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 768 | tb_retimer_remove_all(port); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 769 | tb_remove_dp_resources(port->remote->sw); |
Mika Westerberg | de46203 | 2020-04-02 14:50:52 +0300 | [diff] [blame] | 770 | tb_switch_unconfigure_link(port->remote->sw); |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 771 | tb_switch_lane_bonding_disable(port->remote->sw); |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 772 | tb_switch_remove(port->remote->sw); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 773 | port->remote = NULL; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 774 | if (port->dual_link_port) |
| 775 | port->dual_link_port->remote = NULL; |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 776 | } else { |
| 777 | tb_free_unplugged_children(port->remote->sw); |
| 778 | } |
| 779 | } |
| 780 | } |
| 781 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 782 | static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, |
| 783 | const struct tb_port *port) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 784 | { |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 785 | struct tb_port *down = NULL; |
| 786 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 787 | /* |
| 788 | * To keep plugging devices consistently in the same PCIe |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 789 | * hierarchy, do mapping here for switch downstream PCIe ports. |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 790 | */ |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 791 | if (tb_switch_is_usb4(sw)) { |
| 792 | down = usb4_switch_map_pcie_down(sw, port); |
| 793 | } else if (!tb_route(sw)) { |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 794 | int phy_port = tb_phy_port_from_link(port->port); |
| 795 | int index; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 796 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 797 | /* |
| 798 | * Hard-coded Thunderbolt port to PCIe down port mapping |
| 799 | * per controller. |
| 800 | */ |
Mika Westerberg | 7bffd97e | 2019-03-22 15:16:53 +0200 | [diff] [blame] | 801 | if (tb_switch_is_cactus_ridge(sw) || |
| 802 | tb_switch_is_alpine_ridge(sw)) |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 803 | index = !phy_port ? 6 : 7; |
Mika Westerberg | 17a8f81 | 2019-10-08 16:42:47 +0300 | [diff] [blame] | 804 | else if (tb_switch_is_falcon_ridge(sw)) |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 805 | index = !phy_port ? 6 : 8; |
Mika Westerberg | 7bffd97e | 2019-03-22 15:16:53 +0200 | [diff] [blame] | 806 | else if (tb_switch_is_titan_ridge(sw)) |
| 807 | index = !phy_port ? 8 : 9; |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 808 | else |
| 809 | goto out; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 810 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 811 | /* Validate the hard-coding */ |
| 812 | if (WARN_ON(index > sw->config.max_port_number)) |
| 813 | goto out; |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 814 | |
| 815 | down = &sw->ports[index]; |
| 816 | } |
| 817 | |
| 818 | if (down) { |
| 819 | if (WARN_ON(!tb_port_is_pcie_down(down))) |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 820 | goto out; |
Mika Westerberg | 9cac51a | 2020-03-11 16:12:50 +0300 | [diff] [blame] | 821 | if (tb_pci_port_is_enabled(down)) |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 822 | goto out; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 823 | |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 824 | return down; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 825 | } |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 826 | |
| 827 | out: |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 828 | return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 829 | } |
| 830 | |
Mika Westerberg | e876f34 | 2020-04-02 12:53:14 +0300 | [diff] [blame] | 831 | static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) |
| 832 | { |
| 833 | struct tb_port *host_port, *port; |
| 834 | struct tb_cm *tcm = tb_priv(tb); |
| 835 | |
| 836 | host_port = tb_route(in->sw) ? |
| 837 | tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; |
| 838 | |
| 839 | list_for_each_entry(port, &tcm->dp_resources, list) { |
| 840 | if (!tb_port_is_dpout(port)) |
| 841 | continue; |
| 842 | |
| 843 | if (tb_port_is_enabled(port)) { |
| 844 | tb_port_dbg(port, "in use\n"); |
| 845 | continue; |
| 846 | } |
| 847 | |
| 848 | tb_port_dbg(port, "DP OUT available\n"); |
| 849 | |
| 850 | /* |
| 851 | * Keep the DP tunnel under the topology starting from |
| 852 | * the same host router downstream port. |
| 853 | */ |
| 854 | if (host_port && tb_route(port->sw)) { |
| 855 | struct tb_port *p; |
| 856 | |
| 857 | p = tb_port_at(tb_route(port->sw), tb->root_switch); |
| 858 | if (p != host_port) |
| 859 | continue; |
| 860 | } |
| 861 | |
| 862 | return port; |
| 863 | } |
| 864 | |
| 865 | return NULL; |
| 866 | } |
| 867 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 868 | static void tb_tunnel_dp(struct tb *tb) |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 869 | { |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 870 | int available_up, available_down, ret; |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 871 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 872 | struct tb_port *port, *in, *out; |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 873 | struct tb_tunnel *tunnel; |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 874 | |
Mika Westerberg | c6da62a | 2020-02-18 16:14:42 +0200 | [diff] [blame] | 875 | if (!tb_acpi_may_tunnel_dp()) { |
| 876 | tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); |
| 877 | return; |
| 878 | } |
| 879 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 880 | /* |
| 881 | * Find pair of inactive DP IN and DP OUT adapters and then |
| 882 | * establish a DP tunnel between them. |
| 883 | */ |
| 884 | tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 885 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 886 | in = NULL; |
| 887 | out = NULL; |
| 888 | list_for_each_entry(port, &tcm->dp_resources, list) { |
Mika Westerberg | e876f34 | 2020-04-02 12:53:14 +0300 | [diff] [blame] | 889 | if (!tb_port_is_dpin(port)) |
| 890 | continue; |
| 891 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 892 | if (tb_port_is_enabled(port)) { |
| 893 | tb_port_dbg(port, "in use\n"); |
| 894 | continue; |
| 895 | } |
| 896 | |
Mika Westerberg | e876f34 | 2020-04-02 12:53:14 +0300 | [diff] [blame] | 897 | tb_port_dbg(port, "DP IN available\n"); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 898 | |
Mika Westerberg | e876f34 | 2020-04-02 12:53:14 +0300 | [diff] [blame] | 899 | out = tb_find_dp_out(tb, port); |
| 900 | if (out) { |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 901 | in = port; |
Mika Westerberg | e876f34 | 2020-04-02 12:53:14 +0300 | [diff] [blame] | 902 | break; |
| 903 | } |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 904 | } |
| 905 | |
| 906 | if (!in) { |
| 907 | tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); |
| 908 | return; |
| 909 | } |
| 910 | if (!out) { |
| 911 | tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); |
| 912 | return; |
| 913 | } |
| 914 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 915 | /* |
| 916 | * DP stream needs the domain to be active so runtime resume |
| 917 | * both ends of the tunnel. |
| 918 | * |
| 919 | * This should bring the routers in the middle active as well |
| 920 | * and keeps the domain from runtime suspending while the DP |
| 921 | * tunnel is active. |
| 922 | */ |
| 923 | pm_runtime_get_sync(&in->sw->dev); |
| 924 | pm_runtime_get_sync(&out->sw->dev); |
| 925 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 926 | if (tb_switch_alloc_dp_resource(in->sw, in)) { |
| 927 | tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 928 | goto err_rpm_put; |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 929 | } |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 930 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 931 | /* Make all unused USB3 bandwidth available for the new DP tunnel */ |
| 932 | ret = tb_release_unused_usb3_bandwidth(tb, in, out); |
| 933 | if (ret) { |
| 934 | tb_warn(tb, "failed to release unused bandwidth\n"); |
| 935 | goto err_dealloc_dp; |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 936 | } |
| 937 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 938 | ret = tb_available_bandwidth(tb, in, out, &available_up, |
| 939 | &available_down); |
| 940 | if (ret) |
| 941 | goto err_reclaim; |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 942 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 943 | tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", |
| 944 | available_up, available_down); |
| 945 | |
| 946 | tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 947 | if (!tunnel) { |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 948 | tb_port_dbg(out, "could not allocate DP tunnel\n"); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 949 | goto err_reclaim; |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 950 | } |
| 951 | |
| 952 | if (tb_tunnel_activate(tunnel)) { |
| 953 | tb_port_info(out, "DP tunnel activation failed, aborting\n"); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 954 | goto err_free; |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 955 | } |
| 956 | |
| 957 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 958 | tb_reclaim_usb3_bandwidth(tb, in, out); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 959 | return; |
| 960 | |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 961 | err_free: |
| 962 | tb_tunnel_free(tunnel); |
| 963 | err_reclaim: |
| 964 | tb_reclaim_usb3_bandwidth(tb, in, out); |
| 965 | err_dealloc_dp: |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 966 | tb_switch_dealloc_dp_resource(in->sw, in); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 967 | err_rpm_put: |
| 968 | pm_runtime_mark_last_busy(&out->sw->dev); |
| 969 | pm_runtime_put_autosuspend(&out->sw->dev); |
| 970 | pm_runtime_mark_last_busy(&in->sw->dev); |
| 971 | pm_runtime_put_autosuspend(&in->sw->dev); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 972 | } |
| 973 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 974 | static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 975 | { |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 976 | struct tb_port *in, *out; |
| 977 | struct tb_tunnel *tunnel; |
| 978 | |
| 979 | if (tb_port_is_dpin(port)) { |
| 980 | tb_port_dbg(port, "DP IN resource unavailable\n"); |
| 981 | in = port; |
| 982 | out = NULL; |
| 983 | } else { |
| 984 | tb_port_dbg(port, "DP OUT resource unavailable\n"); |
| 985 | in = NULL; |
| 986 | out = port; |
| 987 | } |
| 988 | |
| 989 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); |
| 990 | tb_deactivate_and_free_tunnel(tunnel); |
| 991 | list_del_init(&port->list); |
| 992 | |
| 993 | /* |
| 994 | * See if there is another DP OUT port that can be used for |
| 995 | * to create another tunnel. |
| 996 | */ |
| 997 | tb_tunnel_dp(tb); |
| 998 | } |
| 999 | |
| 1000 | static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) |
| 1001 | { |
| 1002 | struct tb_cm *tcm = tb_priv(tb); |
| 1003 | struct tb_port *p; |
| 1004 | |
| 1005 | if (tb_port_is_enabled(port)) |
| 1006 | return; |
| 1007 | |
| 1008 | list_for_each_entry(p, &tcm->dp_resources, list) { |
| 1009 | if (p == port) |
| 1010 | return; |
| 1011 | } |
| 1012 | |
| 1013 | tb_port_dbg(port, "DP %s resource available\n", |
| 1014 | tb_port_is_dpin(port) ? "IN" : "OUT"); |
| 1015 | list_add_tail(&port->list, &tcm->dp_resources); |
| 1016 | |
| 1017 | /* Look for suitable DP IN <-> DP OUT pairs now */ |
| 1018 | tb_tunnel_dp(tb); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 1019 | } |
| 1020 | |
Mika Westerberg | 81a2e3e | 2020-05-16 16:20:39 +0300 | [diff] [blame] | 1021 | static void tb_disconnect_and_release_dp(struct tb *tb) |
| 1022 | { |
| 1023 | struct tb_cm *tcm = tb_priv(tb); |
| 1024 | struct tb_tunnel *tunnel, *n; |
| 1025 | |
| 1026 | /* |
| 1027 | * Tear down all DP tunnels and release their resources. They |
| 1028 | * will be re-established after resume based on plug events. |
| 1029 | */ |
| 1030 | list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { |
| 1031 | if (tb_tunnel_is_dp(tunnel)) |
| 1032 | tb_deactivate_and_free_tunnel(tunnel); |
| 1033 | } |
| 1034 | |
| 1035 | while (!list_empty(&tcm->dp_resources)) { |
| 1036 | struct tb_port *port; |
| 1037 | |
| 1038 | port = list_first_entry(&tcm->dp_resources, |
| 1039 | struct tb_port, list); |
| 1040 | list_del_init(&port->list); |
| 1041 | } |
| 1042 | } |
| 1043 | |
Mika Westerberg | 3da88be | 2020-11-10 11:47:14 +0300 | [diff] [blame] | 1044 | static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) |
| 1045 | { |
| 1046 | struct tb_tunnel *tunnel; |
| 1047 | struct tb_port *up; |
| 1048 | |
| 1049 | up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); |
| 1050 | if (WARN_ON(!up)) |
| 1051 | return -ENODEV; |
| 1052 | |
| 1053 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); |
| 1054 | if (WARN_ON(!tunnel)) |
| 1055 | return -ENODEV; |
| 1056 | |
| 1057 | tb_tunnel_deactivate(tunnel); |
| 1058 | list_del(&tunnel->list); |
| 1059 | tb_tunnel_free(tunnel); |
| 1060 | return 0; |
| 1061 | } |
| 1062 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 1063 | static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) |
| 1064 | { |
| 1065 | struct tb_port *up, *down, *port; |
| 1066 | struct tb_cm *tcm = tb_priv(tb); |
| 1067 | struct tb_switch *parent_sw; |
| 1068 | struct tb_tunnel *tunnel; |
| 1069 | |
Mika Westerberg | 386e5e2 | 2019-12-17 15:33:37 +0300 | [diff] [blame] | 1070 | up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 1071 | if (!up) |
| 1072 | return 0; |
| 1073 | |
| 1074 | /* |
| 1075 | * Look up available down port. Since we are chaining it should |
| 1076 | * be found right above this switch. |
| 1077 | */ |
| 1078 | parent_sw = tb_to_switch(sw->dev.parent); |
| 1079 | port = tb_port_at(tb_route(sw), parent_sw); |
| 1080 | down = tb_find_pcie_down(parent_sw, port); |
| 1081 | if (!down) |
| 1082 | return 0; |
| 1083 | |
| 1084 | tunnel = tb_tunnel_alloc_pci(tb, up, down); |
| 1085 | if (!tunnel) |
| 1086 | return -ENOMEM; |
| 1087 | |
| 1088 | if (tb_tunnel_activate(tunnel)) { |
| 1089 | tb_port_info(up, |
| 1090 | "PCIe tunnel activation failed, aborting\n"); |
| 1091 | tb_tunnel_free(tunnel); |
| 1092 | return -EIO; |
| 1093 | } |
| 1094 | |
Gil Fine | 43f977b | 2021-12-17 03:16:43 +0200 | [diff] [blame] | 1095 | /* |
| 1096 | * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it |
| 1097 | * here. |
| 1098 | */ |
| 1099 | if (tb_switch_pcie_l1_enable(sw)) |
| 1100 | tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); |
| 1101 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 1102 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
| 1103 | return 0; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 1104 | } |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 1105 | |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1106 | static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
| 1107 | int transmit_path, int transmit_ring, |
| 1108 | int receive_path, int receive_ring) |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1109 | { |
| 1110 | struct tb_cm *tcm = tb_priv(tb); |
| 1111 | struct tb_port *nhi_port, *dst_port; |
| 1112 | struct tb_tunnel *tunnel; |
| 1113 | struct tb_switch *sw; |
| 1114 | |
| 1115 | sw = tb_to_switch(xd->dev.parent); |
| 1116 | dst_port = tb_port_at(xd->route, sw); |
Mika Westerberg | 386e5e2 | 2019-12-17 15:33:37 +0300 | [diff] [blame] | 1117 | nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1118 | |
| 1119 | mutex_lock(&tb->lock); |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1120 | tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, |
| 1121 | transmit_ring, receive_path, receive_ring); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1122 | if (!tunnel) { |
| 1123 | mutex_unlock(&tb->lock); |
| 1124 | return -ENOMEM; |
| 1125 | } |
| 1126 | |
| 1127 | if (tb_tunnel_activate(tunnel)) { |
| 1128 | tb_port_info(nhi_port, |
| 1129 | "DMA tunnel activation failed, aborting\n"); |
| 1130 | tb_tunnel_free(tunnel); |
| 1131 | mutex_unlock(&tb->lock); |
| 1132 | return -EIO; |
| 1133 | } |
| 1134 | |
| 1135 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
| 1136 | mutex_unlock(&tb->lock); |
| 1137 | return 0; |
| 1138 | } |
| 1139 | |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1140 | static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
| 1141 | int transmit_path, int transmit_ring, |
| 1142 | int receive_path, int receive_ring) |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1143 | { |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1144 | struct tb_cm *tcm = tb_priv(tb); |
| 1145 | struct tb_port *nhi_port, *dst_port; |
| 1146 | struct tb_tunnel *tunnel, *n; |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1147 | struct tb_switch *sw; |
| 1148 | |
| 1149 | sw = tb_to_switch(xd->dev.parent); |
| 1150 | dst_port = tb_port_at(xd->route, sw); |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1151 | nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1152 | |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1153 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
| 1154 | if (!tb_tunnel_is_dma(tunnel)) |
| 1155 | continue; |
| 1156 | if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) |
| 1157 | continue; |
| 1158 | |
| 1159 | if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, |
| 1160 | receive_path, receive_ring)) |
| 1161 | tb_deactivate_and_free_tunnel(tunnel); |
| 1162 | } |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1163 | } |
| 1164 | |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1165 | static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
| 1166 | int transmit_path, int transmit_ring, |
| 1167 | int receive_path, int receive_ring) |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1168 | { |
| 1169 | if (!xd->is_unplugged) { |
| 1170 | mutex_lock(&tb->lock); |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1171 | __tb_disconnect_xdomain_paths(tb, xd, transmit_path, |
| 1172 | transmit_ring, receive_path, |
| 1173 | receive_ring); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1174 | mutex_unlock(&tb->lock); |
| 1175 | } |
| 1176 | return 0; |
| 1177 | } |
| 1178 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1179 | /* hotplug handling */ |
| 1180 | |
Lee Jones | 877e50b | 2021-01-27 11:25:50 +0000 | [diff] [blame] | 1181 | /* |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1182 | * tb_handle_hotplug() - handle hotplug event |
| 1183 | * |
| 1184 | * Executes on tb->wq. |
| 1185 | */ |
| 1186 | static void tb_handle_hotplug(struct work_struct *work) |
| 1187 | { |
| 1188 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); |
| 1189 | struct tb *tb = ev->tb; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1190 | struct tb_cm *tcm = tb_priv(tb); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1191 | struct tb_switch *sw; |
| 1192 | struct tb_port *port; |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 1193 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1194 | /* Bring the domain back from sleep if it was suspended */ |
| 1195 | pm_runtime_get_sync(&tb->dev); |
| 1196 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1197 | mutex_lock(&tb->lock); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1198 | if (!tcm->hotplug_active) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1199 | goto out; /* during init, suspend or shutdown */ |
| 1200 | |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 1201 | sw = tb_switch_find_by_route(tb, ev->route); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1202 | if (!sw) { |
| 1203 | tb_warn(tb, |
| 1204 | "hotplug event from non existent switch %llx:%x (unplug: %d)\n", |
| 1205 | ev->route, ev->port, ev->unplug); |
| 1206 | goto out; |
| 1207 | } |
| 1208 | if (ev->port > sw->config.max_port_number) { |
| 1209 | tb_warn(tb, |
| 1210 | "hotplug event from non existent port %llx:%x (unplug: %d)\n", |
| 1211 | ev->route, ev->port, ev->unplug); |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 1212 | goto put_sw; |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1213 | } |
| 1214 | port = &sw->ports[ev->port]; |
| 1215 | if (tb_is_upstream_port(port)) { |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 1216 | tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", |
| 1217 | ev->route, ev->port, ev->unplug); |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 1218 | goto put_sw; |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1219 | } |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1220 | |
| 1221 | pm_runtime_get_sync(&sw->dev); |
| 1222 | |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1223 | if (ev->unplug) { |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 1224 | tb_retimer_remove_all(port); |
| 1225 | |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 1226 | if (tb_port_has_remote(port)) { |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1227 | tb_port_dbg(port, "switch unplugged\n"); |
Lukas Wunner | aae20bb | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 1228 | tb_sw_set_unplugged(port->remote->sw); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 1229 | tb_free_invalid_tunnels(tb); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 1230 | tb_remove_dp_resources(port->remote->sw); |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 1231 | tb_switch_tmu_disable(port->remote->sw); |
Mika Westerberg | de46203 | 2020-04-02 14:50:52 +0300 | [diff] [blame] | 1232 | tb_switch_unconfigure_link(port->remote->sw); |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 1233 | tb_switch_lane_bonding_disable(port->remote->sw); |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 1234 | tb_switch_remove(port->remote->sw); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1235 | port->remote = NULL; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 1236 | if (port->dual_link_port) |
| 1237 | port->dual_link_port->remote = NULL; |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 1238 | /* Maybe we can create another DP tunnel */ |
| 1239 | tb_tunnel_dp(tb); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1240 | } else if (port->xdomain) { |
| 1241 | struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); |
| 1242 | |
| 1243 | tb_port_dbg(port, "xdomain unplugged\n"); |
| 1244 | /* |
| 1245 | * Service drivers are unbound during |
| 1246 | * tb_xdomain_remove() so setting XDomain as |
| 1247 | * unplugged here prevents deadlock if they call |
| 1248 | * tb_xdomain_disable_paths(). We will tear down |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1249 | * all the tunnels below. |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1250 | */ |
| 1251 | xd->is_unplugged = true; |
| 1252 | tb_xdomain_remove(xd); |
| 1253 | port->xdomain = NULL; |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1254 | __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1255 | tb_xdomain_put(xd); |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 1256 | tb_port_unconfigure_xdomain(port); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 1257 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
| 1258 | tb_dp_resource_unavailable(tb, port); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1259 | } else { |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 1260 | tb_port_dbg(port, |
| 1261 | "got unplug event for disconnected port, ignoring\n"); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1262 | } |
| 1263 | } else if (port->remote) { |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 1264 | tb_port_dbg(port, "got plug event for connected port, ignoring\n"); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1265 | } else { |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 1266 | if (tb_port_is_null(port)) { |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 1267 | tb_port_dbg(port, "hotplug: scanning\n"); |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 1268 | tb_scan_port(port); |
| 1269 | if (!port->remote) |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 1270 | tb_port_dbg(port, "hotplug: no switch found\n"); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 1271 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
| 1272 | tb_dp_resource_available(tb, port); |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 1273 | } |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 1274 | } |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 1275 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1276 | pm_runtime_mark_last_busy(&sw->dev); |
| 1277 | pm_runtime_put_autosuspend(&sw->dev); |
| 1278 | |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 1279 | put_sw: |
| 1280 | tb_switch_put(sw); |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1281 | out: |
| 1282 | mutex_unlock(&tb->lock); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1283 | |
| 1284 | pm_runtime_mark_last_busy(&tb->dev); |
| 1285 | pm_runtime_put_autosuspend(&tb->dev); |
| 1286 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1287 | kfree(ev); |
| 1288 | } |
| 1289 | |
Lee Jones | 877e50b | 2021-01-27 11:25:50 +0000 | [diff] [blame] | 1290 | /* |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1291 | * tb_schedule_hotplug_handler() - callback function for the control channel |
| 1292 | * |
| 1293 | * Delegates to tb_handle_hotplug. |
| 1294 | */ |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 1295 | static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, |
| 1296 | const void *buf, size_t size) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1297 | { |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 1298 | const struct cfg_event_pkg *pkg = buf; |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 1299 | u64 route; |
| 1300 | |
| 1301 | if (type != TB_CFG_PKG_EVENT) { |
| 1302 | tb_warn(tb, "unexpected event %#x, ignoring\n", type); |
| 1303 | return; |
| 1304 | } |
| 1305 | |
| 1306 | route = tb_cfg_get_route(&pkg->header); |
| 1307 | |
Mika Westerberg | 210e9f5 | 2019-12-17 15:33:39 +0300 | [diff] [blame] | 1308 | if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 1309 | tb_warn(tb, "could not ack plug event on %llx:%x\n", route, |
| 1310 | pkg->port); |
| 1311 | } |
| 1312 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 1313 | tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1314 | } |
| 1315 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1316 | static void tb_stop(struct tb *tb) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1317 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1318 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 1319 | struct tb_tunnel *tunnel; |
| 1320 | struct tb_tunnel *n; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 1321 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1322 | cancel_delayed_work(&tcm->remove_work); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 1323 | /* tunnels are only present after everything has been initialized */ |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1324 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
| 1325 | /* |
| 1326 | * DMA tunnels require the driver to be functional so we |
| 1327 | * tear them down. Other protocol tunnels can be left |
| 1328 | * intact. |
| 1329 | */ |
| 1330 | if (tb_tunnel_is_dma(tunnel)) |
| 1331 | tb_tunnel_deactivate(tunnel); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 1332 | tb_tunnel_free(tunnel); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1333 | } |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 1334 | tb_switch_remove(tb->root_switch); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1335 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1336 | } |
| 1337 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 1338 | static int tb_scan_finalize_switch(struct device *dev, void *data) |
| 1339 | { |
| 1340 | if (tb_is_switch(dev)) { |
| 1341 | struct tb_switch *sw = tb_to_switch(dev); |
| 1342 | |
| 1343 | /* |
| 1344 | * If we found that the switch was already setup by the |
| 1345 | * boot firmware, mark it as authorized now before we |
| 1346 | * send uevent to userspace. |
| 1347 | */ |
| 1348 | if (sw->boot) |
| 1349 | sw->authorized = 1; |
| 1350 | |
| 1351 | dev_set_uevent_suppress(dev, false); |
| 1352 | kobject_uevent(&dev->kobj, KOBJ_ADD); |
| 1353 | device_for_each_child(dev, NULL, tb_scan_finalize_switch); |
| 1354 | } |
| 1355 | |
| 1356 | return 0; |
| 1357 | } |
| 1358 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1359 | static int tb_start(struct tb *tb) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1360 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1361 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 1362 | int ret; |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1363 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 1364 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); |
Mika Westerberg | 444ac38 | 2018-12-30 12:17:52 +0200 | [diff] [blame] | 1365 | if (IS_ERR(tb->root_switch)) |
| 1366 | return PTR_ERR(tb->root_switch); |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 1367 | |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 1368 | /* |
| 1369 | * ICM firmware upgrade needs running firmware and in native |
| 1370 | * mode that is not available so disable firmware upgrade of the |
| 1371 | * root switch. |
| 1372 | */ |
| 1373 | tb->root_switch->no_nvm_upgrade = true; |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1374 | /* All USB4 routers support runtime PM */ |
| 1375 | tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 1376 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 1377 | ret = tb_switch_configure(tb->root_switch); |
| 1378 | if (ret) { |
| 1379 | tb_switch_put(tb->root_switch); |
| 1380 | return ret; |
| 1381 | } |
| 1382 | |
| 1383 | /* Announce the switch to the world */ |
| 1384 | ret = tb_switch_add(tb->root_switch); |
| 1385 | if (ret) { |
| 1386 | tb_switch_put(tb->root_switch); |
| 1387 | return ret; |
| 1388 | } |
| 1389 | |
Gil Fine | a28ec0e | 2021-12-17 03:16:38 +0200 | [diff] [blame] | 1390 | tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false); |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 1391 | /* Enable TMU if it is off */ |
| 1392 | tb_switch_tmu_enable(tb->root_switch); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 1393 | /* Full scan to discover devices added before the driver was loaded. */ |
| 1394 | tb_scan_switch(tb->root_switch); |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 1395 | /* Find out tunnels created by the boot firmware */ |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 1396 | tb_discover_tunnels(tb); |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 1397 | /* |
| 1398 | * If the boot firmware did not create USB 3.x tunnels create them |
| 1399 | * now for the whole topology. |
| 1400 | */ |
| 1401 | tb_create_usb3_tunnels(tb->root_switch); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 1402 | /* Add DP IN resources for the root switch */ |
| 1403 | tb_add_dp_resources(tb->root_switch); |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 1404 | /* Make the discovered switches available to the userspace */ |
| 1405 | device_for_each_child(&tb->root_switch->dev, NULL, |
| 1406 | tb_scan_finalize_switch); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 1407 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1408 | /* Allow tb_handle_hotplug to progress events */ |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1409 | tcm->hotplug_active = true; |
| 1410 | return 0; |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1411 | } |
| 1412 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1413 | static int tb_suspend_noirq(struct tb *tb) |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1414 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1415 | struct tb_cm *tcm = tb_priv(tb); |
| 1416 | |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 1417 | tb_dbg(tb, "suspending...\n"); |
Mika Westerberg | 81a2e3e | 2020-05-16 16:20:39 +0300 | [diff] [blame] | 1418 | tb_disconnect_and_release_dp(tb); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1419 | tb_switch_suspend(tb->root_switch, false); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1420 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 1421 | tb_dbg(tb, "suspend finished\n"); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1422 | |
| 1423 | return 0; |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1424 | } |
| 1425 | |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 1426 | static void tb_restore_children(struct tb_switch *sw) |
| 1427 | { |
| 1428 | struct tb_port *port; |
| 1429 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1430 | /* No need to restore if the router is already unplugged */ |
| 1431 | if (sw->is_unplugged) |
| 1432 | return; |
| 1433 | |
Gil Fine | 8a90e4f | 2021-12-17 03:16:39 +0200 | [diff] [blame] | 1434 | if (tb_switch_enable_clx(sw, TB_CL0S)) |
| 1435 | tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n"); |
| 1436 | |
Gil Fine | a28ec0e | 2021-12-17 03:16:38 +0200 | [diff] [blame] | 1437 | /* |
| 1438 | * tb_switch_tmu_configure() was already called when the switch was |
| 1439 | * added before entering system sleep or runtime suspend, |
| 1440 | * so no need to call it again before enabling TMU. |
| 1441 | */ |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 1442 | if (tb_enable_tmu(sw)) |
| 1443 | tb_sw_warn(sw, "failed to restore TMU configuration\n"); |
| 1444 | |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 1445 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 1446 | if (!tb_port_has_remote(port) && !port->xdomain) |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 1447 | continue; |
| 1448 | |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 1449 | if (port->remote) { |
| 1450 | tb_switch_lane_bonding_enable(port->remote->sw); |
| 1451 | tb_switch_configure_link(port->remote->sw); |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 1452 | |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 1453 | tb_restore_children(port->remote->sw); |
| 1454 | } else if (port->xdomain) { |
| 1455 | tb_port_configure_xdomain(port); |
| 1456 | } |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 1457 | } |
| 1458 | } |
| 1459 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1460 | static int tb_resume_noirq(struct tb *tb) |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1461 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1462 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 1463 | struct tb_tunnel *tunnel, *n; |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 1464 | unsigned int usb3_delay = 0; |
| 1465 | LIST_HEAD(tunnels); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1466 | |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 1467 | tb_dbg(tb, "resuming...\n"); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1468 | |
| 1469 | /* remove any pci devices the firmware might have setup */ |
Mika Westerberg | 356b6c4 | 2019-09-19 15:25:30 +0300 | [diff] [blame] | 1470 | tb_switch_reset(tb->root_switch); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1471 | |
| 1472 | tb_switch_resume(tb->root_switch); |
| 1473 | tb_free_invalid_tunnels(tb); |
| 1474 | tb_free_unplugged_children(tb->root_switch); |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 1475 | tb_restore_children(tb->root_switch); |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 1476 | |
| 1477 | /* |
| 1478 | * If we get here from suspend to disk the boot firmware or the |
| 1479 | * restore kernel might have created tunnels of its own. Since |
| 1480 | * we cannot be sure they are usable for us we find and tear |
| 1481 | * them down. |
| 1482 | */ |
| 1483 | tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); |
| 1484 | list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { |
| 1485 | if (tb_tunnel_is_usb3(tunnel)) |
| 1486 | usb3_delay = 500; |
| 1487 | tb_tunnel_deactivate(tunnel); |
| 1488 | tb_tunnel_free(tunnel); |
| 1489 | } |
| 1490 | |
| 1491 | /* Re-create our tunnels now */ |
| 1492 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
| 1493 | /* USB3 requires delay before it can be re-activated */ |
| 1494 | if (tb_tunnel_is_usb3(tunnel)) { |
| 1495 | msleep(usb3_delay); |
| 1496 | /* Only need to do it once */ |
| 1497 | usb3_delay = 0; |
| 1498 | } |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 1499 | tb_tunnel_restart(tunnel); |
Mika Westerberg | 43bddb2 | 2021-11-14 17:20:59 +0200 | [diff] [blame] | 1500 | } |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1501 | if (!list_empty(&tcm->tunnel_list)) { |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1502 | /* |
| 1503 | * the pcie links need some time to get going. |
| 1504 | * 100ms works for me... |
| 1505 | */ |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 1506 | tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1507 | msleep(100); |
| 1508 | } |
| 1509 | /* Allow tb_handle_hotplug to progress events */ |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1510 | tcm->hotplug_active = true; |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 1511 | tb_dbg(tb, "resume finished\n"); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1512 | |
| 1513 | return 0; |
| 1514 | } |
| 1515 | |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1516 | static int tb_free_unplugged_xdomains(struct tb_switch *sw) |
| 1517 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 1518 | struct tb_port *port; |
| 1519 | int ret = 0; |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1520 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 1521 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1522 | if (tb_is_upstream_port(port)) |
| 1523 | continue; |
| 1524 | if (port->xdomain && port->xdomain->is_unplugged) { |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 1525 | tb_retimer_remove_all(port); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1526 | tb_xdomain_remove(port->xdomain); |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 1527 | tb_port_unconfigure_xdomain(port); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1528 | port->xdomain = NULL; |
| 1529 | ret++; |
| 1530 | } else if (port->remote) { |
| 1531 | ret += tb_free_unplugged_xdomains(port->remote->sw); |
| 1532 | } |
| 1533 | } |
| 1534 | |
| 1535 | return ret; |
| 1536 | } |
| 1537 | |
Mika Westerberg | 884e4d5 | 2020-08-31 13:05:14 +0300 | [diff] [blame] | 1538 | static int tb_freeze_noirq(struct tb *tb) |
| 1539 | { |
| 1540 | struct tb_cm *tcm = tb_priv(tb); |
| 1541 | |
| 1542 | tcm->hotplug_active = false; |
| 1543 | return 0; |
| 1544 | } |
| 1545 | |
| 1546 | static int tb_thaw_noirq(struct tb *tb) |
| 1547 | { |
| 1548 | struct tb_cm *tcm = tb_priv(tb); |
| 1549 | |
| 1550 | tcm->hotplug_active = true; |
| 1551 | return 0; |
| 1552 | } |
| 1553 | |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1554 | static void tb_complete(struct tb *tb) |
| 1555 | { |
| 1556 | /* |
| 1557 | * Release any unplugged XDomains and if there is a case where |
| 1558 | * another domain is swapped in place of unplugged XDomain we |
| 1559 | * need to run another rescan. |
| 1560 | */ |
| 1561 | mutex_lock(&tb->lock); |
| 1562 | if (tb_free_unplugged_xdomains(tb->root_switch)) |
| 1563 | tb_scan_switch(tb->root_switch); |
| 1564 | mutex_unlock(&tb->lock); |
| 1565 | } |
| 1566 | |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1567 | static int tb_runtime_suspend(struct tb *tb) |
| 1568 | { |
| 1569 | struct tb_cm *tcm = tb_priv(tb); |
| 1570 | |
| 1571 | mutex_lock(&tb->lock); |
| 1572 | tb_switch_suspend(tb->root_switch, true); |
| 1573 | tcm->hotplug_active = false; |
| 1574 | mutex_unlock(&tb->lock); |
| 1575 | |
| 1576 | return 0; |
| 1577 | } |
| 1578 | |
| 1579 | static void tb_remove_work(struct work_struct *work) |
| 1580 | { |
| 1581 | struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); |
| 1582 | struct tb *tb = tcm_to_tb(tcm); |
| 1583 | |
| 1584 | mutex_lock(&tb->lock); |
| 1585 | if (tb->root_switch) { |
| 1586 | tb_free_unplugged_children(tb->root_switch); |
| 1587 | tb_free_unplugged_xdomains(tb->root_switch); |
| 1588 | } |
| 1589 | mutex_unlock(&tb->lock); |
| 1590 | } |
| 1591 | |
| 1592 | static int tb_runtime_resume(struct tb *tb) |
| 1593 | { |
| 1594 | struct tb_cm *tcm = tb_priv(tb); |
| 1595 | struct tb_tunnel *tunnel, *n; |
| 1596 | |
| 1597 | mutex_lock(&tb->lock); |
| 1598 | tb_switch_resume(tb->root_switch); |
| 1599 | tb_free_invalid_tunnels(tb); |
| 1600 | tb_restore_children(tb->root_switch); |
| 1601 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) |
| 1602 | tb_tunnel_restart(tunnel); |
| 1603 | tcm->hotplug_active = true; |
| 1604 | mutex_unlock(&tb->lock); |
| 1605 | |
| 1606 | /* |
| 1607 | * Schedule cleanup of any unplugged devices. Run this in a |
| 1608 | * separate thread to avoid possible deadlock if the device |
| 1609 | * removal runtime resumes the unplugged device. |
| 1610 | */ |
| 1611 | queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); |
| 1612 | return 0; |
| 1613 | } |
| 1614 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1615 | static const struct tb_cm_ops tb_cm_ops = { |
| 1616 | .start = tb_start, |
| 1617 | .stop = tb_stop, |
| 1618 | .suspend_noirq = tb_suspend_noirq, |
| 1619 | .resume_noirq = tb_resume_noirq, |
Mika Westerberg | 884e4d5 | 2020-08-31 13:05:14 +0300 | [diff] [blame] | 1620 | .freeze_noirq = tb_freeze_noirq, |
| 1621 | .thaw_noirq = tb_thaw_noirq, |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1622 | .complete = tb_complete, |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1623 | .runtime_suspend = tb_runtime_suspend, |
| 1624 | .runtime_resume = tb_runtime_resume, |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 1625 | .handle_event = tb_handle_event, |
Mika Westerberg | 3da88be | 2020-11-10 11:47:14 +0300 | [diff] [blame] | 1626 | .disapprove_switch = tb_disconnect_pci, |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 1627 | .approve_switch = tb_tunnel_pci, |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 1628 | .approve_xdomain_paths = tb_approve_xdomain_paths, |
| 1629 | .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1630 | }; |
| 1631 | |
Mika Westerberg | 349bfe0 | 2021-05-24 17:57:32 +0300 | [diff] [blame] | 1632 | /* |
| 1633 | * During suspend the Thunderbolt controller is reset and all PCIe |
| 1634 | * tunnels are lost. The NHI driver will try to reestablish all tunnels |
| 1635 | * during resume. This adds device links between the tunneled PCIe |
| 1636 | * downstream ports and the NHI so that the device core will make sure |
| 1637 | * NHI is resumed first before the rest. |
| 1638 | */ |
| 1639 | static void tb_apple_add_links(struct tb_nhi *nhi) |
| 1640 | { |
| 1641 | struct pci_dev *upstream, *pdev; |
| 1642 | |
| 1643 | if (!x86_apple_machine) |
| 1644 | return; |
| 1645 | |
| 1646 | switch (nhi->pdev->device) { |
| 1647 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
| 1648 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: |
| 1649 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: |
| 1650 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: |
| 1651 | break; |
| 1652 | default: |
| 1653 | return; |
| 1654 | } |
| 1655 | |
| 1656 | upstream = pci_upstream_bridge(nhi->pdev); |
| 1657 | while (upstream) { |
| 1658 | if (!pci_is_pcie(upstream)) |
| 1659 | return; |
| 1660 | if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) |
| 1661 | break; |
| 1662 | upstream = pci_upstream_bridge(upstream); |
| 1663 | } |
| 1664 | |
| 1665 | if (!upstream) |
| 1666 | return; |
| 1667 | |
| 1668 | /* |
| 1669 | * For each hotplug downstream port, create add device link |
| 1670 | * back to NHI so that PCIe tunnels can be re-established after |
| 1671 | * sleep. |
| 1672 | */ |
| 1673 | for_each_pci_bridge(pdev, upstream->subordinate) { |
| 1674 | const struct device_link *link; |
| 1675 | |
| 1676 | if (!pci_is_pcie(pdev)) |
| 1677 | continue; |
| 1678 | if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || |
| 1679 | !pdev->is_hotplug_bridge) |
| 1680 | continue; |
| 1681 | |
| 1682 | link = device_link_add(&pdev->dev, &nhi->pdev->dev, |
| 1683 | DL_FLAG_AUTOREMOVE_SUPPLIER | |
| 1684 | DL_FLAG_PM_RUNTIME); |
| 1685 | if (link) { |
| 1686 | dev_dbg(&nhi->pdev->dev, "created link from %s\n", |
| 1687 | dev_name(&pdev->dev)); |
| 1688 | } else { |
| 1689 | dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", |
| 1690 | dev_name(&pdev->dev)); |
| 1691 | } |
| 1692 | } |
| 1693 | } |
| 1694 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1695 | struct tb *tb_probe(struct tb_nhi *nhi) |
| 1696 | { |
| 1697 | struct tb_cm *tcm; |
| 1698 | struct tb *tb; |
| 1699 | |
Mika Westerberg | 7f0a34d | 2020-12-29 13:44:57 +0200 | [diff] [blame] | 1700 | tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1701 | if (!tb) |
| 1702 | return NULL; |
| 1703 | |
Mika Westerberg | c6da62a | 2020-02-18 16:14:42 +0200 | [diff] [blame] | 1704 | if (tb_acpi_may_tunnel_pcie()) |
| 1705 | tb->security_level = TB_SECURITY_USER; |
| 1706 | else |
| 1707 | tb->security_level = TB_SECURITY_NOPCIE; |
| 1708 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1709 | tb->cm_ops = &tb_cm_ops; |
| 1710 | |
| 1711 | tcm = tb_priv(tb); |
| 1712 | INIT_LIST_HEAD(&tcm->tunnel_list); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 1713 | INIT_LIST_HEAD(&tcm->dp_resources); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 1714 | INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1715 | |
Mika Westerberg | e025880 | 2020-11-10 11:02:31 +0300 | [diff] [blame] | 1716 | tb_dbg(tb, "using software connection manager\n"); |
| 1717 | |
Mika Westerberg | 349bfe0 | 2021-05-24 17:57:32 +0300 | [diff] [blame] | 1718 | tb_apple_add_links(nhi); |
| 1719 | tb_acpi_add_links(nhi); |
| 1720 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 1721 | return tb; |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 1722 | } |