Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 2 | /* |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 3 | * Thunderbolt driver - bus logic (NHI independent) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 6 | * Copyright (C) 2019, Intel Corporation |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/delay.h> |
Lukas Wunner | 630b3af | 2017-08-01 14:10:41 +0200 | [diff] [blame] | 12 | #include <linux/platform_data/x86/apple.h> |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 13 | |
| 14 | #include "tb.h" |
Andreas Noever | 7adf609 | 2014-06-03 22:04:01 +0200 | [diff] [blame] | 15 | #include "tb_regs.h" |
Mika Westerberg | 1752b9f | 2017-02-19 10:58:35 +0200 | [diff] [blame] | 16 | #include "tunnel.h" |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 17 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 18 | /** |
| 19 | * struct tb_cm - Simple Thunderbolt connection manager |
| 20 | * @tunnel_list: List of active tunnels |
| 21 | * @hotplug_active: tb_handle_hotplug will stop progressing plug |
| 22 | * events and exit if this is not set (it needs to |
| 23 | * acquire the lock one more time). Used to drain wq |
| 24 | * after cfg has been paused. |
| 25 | */ |
| 26 | struct tb_cm { |
| 27 | struct list_head tunnel_list; |
| 28 | bool hotplug_active; |
| 29 | }; |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 30 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 31 | struct tb_hotplug_event { |
| 32 | struct work_struct work; |
| 33 | struct tb *tb; |
| 34 | u64 route; |
| 35 | u8 port; |
| 36 | bool unplug; |
| 37 | }; |
| 38 | |
| 39 | static void tb_handle_hotplug(struct work_struct *work); |
| 40 | |
| 41 | static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) |
| 42 | { |
| 43 | struct tb_hotplug_event *ev; |
| 44 | |
| 45 | ev = kmalloc(sizeof(*ev), GFP_KERNEL); |
| 46 | if (!ev) |
| 47 | return; |
| 48 | |
| 49 | ev->tb = tb; |
| 50 | ev->route = route; |
| 51 | ev->port = port; |
| 52 | ev->unplug = unplug; |
| 53 | INIT_WORK(&ev->work, tb_handle_hotplug); |
| 54 | queue_work(tb->wq, &ev->work); |
| 55 | } |
| 56 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 57 | /* enumeration & hot plug handling */ |
| 58 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 59 | static void tb_discover_tunnels(struct tb_switch *sw) |
| 60 | { |
| 61 | struct tb *tb = sw->tb; |
| 62 | struct tb_cm *tcm = tb_priv(tb); |
| 63 | struct tb_port *port; |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 64 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 65 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 66 | struct tb_tunnel *tunnel = NULL; |
| 67 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 68 | switch (port->config.type) { |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 69 | case TB_TYPE_DP_HDMI_IN: |
| 70 | tunnel = tb_tunnel_discover_dp(tb, port); |
| 71 | break; |
| 72 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 73 | case TB_TYPE_PCIE_DOWN: |
| 74 | tunnel = tb_tunnel_discover_pci(tb, port); |
| 75 | break; |
| 76 | |
| 77 | default: |
| 78 | break; |
| 79 | } |
| 80 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 81 | if (!tunnel) |
| 82 | continue; |
| 83 | |
| 84 | if (tb_tunnel_is_pci(tunnel)) { |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 85 | struct tb_switch *parent = tunnel->dst_port->sw; |
| 86 | |
| 87 | while (parent != tunnel->src_port->sw) { |
| 88 | parent->boot = true; |
| 89 | parent = tb_switch_parent(parent); |
| 90 | } |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 91 | } |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 92 | |
| 93 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 94 | } |
| 95 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 96 | tb_switch_for_each_port(sw, port) { |
| 97 | if (tb_port_has_remote(port)) |
| 98 | tb_discover_tunnels(port->remote->sw); |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 99 | } |
| 100 | } |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 101 | |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 102 | static void tb_scan_xdomain(struct tb_port *port) |
| 103 | { |
| 104 | struct tb_switch *sw = port->sw; |
| 105 | struct tb *tb = sw->tb; |
| 106 | struct tb_xdomain *xd; |
| 107 | u64 route; |
| 108 | |
| 109 | route = tb_downstream_route(port); |
| 110 | xd = tb_xdomain_find_by_route(tb, route); |
| 111 | if (xd) { |
| 112 | tb_xdomain_put(xd); |
| 113 | return; |
| 114 | } |
| 115 | |
| 116 | xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, |
| 117 | NULL); |
| 118 | if (xd) { |
| 119 | tb_port_at(route, sw)->xdomain = xd; |
| 120 | tb_xdomain_add(xd); |
| 121 | } |
| 122 | } |
| 123 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 124 | static void tb_scan_port(struct tb_port *port); |
| 125 | |
| 126 | /** |
| 127 | * tb_scan_switch() - scan for and initialize downstream switches |
| 128 | */ |
| 129 | static void tb_scan_switch(struct tb_switch *sw) |
| 130 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 131 | struct tb_port *port; |
| 132 | |
| 133 | tb_switch_for_each_port(sw, port) |
| 134 | tb_scan_port(port); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /** |
| 138 | * tb_scan_port() - check for and initialize switches below port |
| 139 | */ |
| 140 | static void tb_scan_port(struct tb_port *port) |
| 141 | { |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 142 | struct tb_cm *tcm = tb_priv(port->sw->tb); |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 143 | struct tb_port *upstream_port; |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 144 | struct tb_switch *sw; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 145 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 146 | if (tb_is_upstream_port(port)) |
| 147 | return; |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 148 | |
| 149 | if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && |
| 150 | !tb_dp_port_is_enabled(port)) { |
| 151 | tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); |
| 152 | tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, |
| 153 | false); |
| 154 | return; |
| 155 | } |
| 156 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 157 | if (port->config.type != TB_TYPE_PORT) |
| 158 | return; |
Andreas Noever | 343fcb8 | 2014-06-12 23:11:47 +0200 | [diff] [blame] | 159 | if (port->dual_link_port && port->link_nr) |
| 160 | return; /* |
| 161 | * Downstream switch is reachable through two ports. |
| 162 | * Only scan on the primary port (link_nr == 0). |
| 163 | */ |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 164 | if (tb_wait_for_port(port, false) <= 0) |
| 165 | return; |
| 166 | if (port->remote) { |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 167 | tb_port_dbg(port, "port already has a remote\n"); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 168 | return; |
| 169 | } |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 170 | sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, |
| 171 | tb_downstream_route(port)); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 172 | if (IS_ERR(sw)) { |
| 173 | /* |
| 174 | * If there is an error accessing the connected switch |
| 175 | * it may be connected to another domain. Also we allow |
| 176 | * the other domain to be connected to a max depth switch. |
| 177 | */ |
| 178 | if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) |
| 179 | tb_scan_xdomain(port); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 180 | return; |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 181 | } |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 182 | |
| 183 | if (tb_switch_configure(sw)) { |
| 184 | tb_switch_put(sw); |
| 185 | return; |
| 186 | } |
| 187 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 188 | /* |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 189 | * If there was previously another domain connected remove it |
| 190 | * first. |
| 191 | */ |
| 192 | if (port->xdomain) { |
| 193 | tb_xdomain_remove(port->xdomain); |
| 194 | port->xdomain = NULL; |
| 195 | } |
| 196 | |
| 197 | /* |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 198 | * Do not send uevents until we have discovered all existing |
| 199 | * tunnels and know which switches were authorized already by |
| 200 | * the boot firmware. |
| 201 | */ |
| 202 | if (!tcm->hotplug_active) |
| 203 | dev_set_uevent_suppress(&sw->dev, true); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 204 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 205 | if (tb_switch_add(sw)) { |
| 206 | tb_switch_put(sw); |
| 207 | return; |
| 208 | } |
| 209 | |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 210 | /* Link the switches using both links if available */ |
| 211 | upstream_port = tb_upstream_port(sw); |
| 212 | port->remote = upstream_port; |
| 213 | upstream_port->remote = port; |
| 214 | if (port->dual_link_port && upstream_port->dual_link_port) { |
| 215 | port->dual_link_port->remote = upstream_port->dual_link_port; |
| 216 | upstream_port->dual_link_port->remote = port->dual_link_port; |
| 217 | } |
| 218 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 219 | tb_scan_switch(sw); |
| 220 | } |
| 221 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 222 | static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type, |
| 223 | struct tb_port *src_port, struct tb_port *dst_port) |
| 224 | { |
| 225 | struct tb_cm *tcm = tb_priv(tb); |
| 226 | struct tb_tunnel *tunnel; |
| 227 | |
| 228 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
| 229 | if (tunnel->type == type && |
| 230 | ((src_port && src_port == tunnel->src_port) || |
| 231 | (dst_port && dst_port == tunnel->dst_port))) { |
| 232 | tb_tunnel_deactivate(tunnel); |
| 233 | list_del(&tunnel->list); |
| 234 | tb_tunnel_free(tunnel); |
| 235 | return 0; |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | return -ENODEV; |
| 240 | } |
| 241 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 242 | /** |
| 243 | * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away |
| 244 | */ |
| 245 | static void tb_free_invalid_tunnels(struct tb *tb) |
| 246 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 247 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 248 | struct tb_tunnel *tunnel; |
| 249 | struct tb_tunnel *n; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 250 | |
| 251 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 252 | if (tb_tunnel_is_invalid(tunnel)) { |
| 253 | tb_tunnel_deactivate(tunnel); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 254 | list_del(&tunnel->list); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 255 | tb_tunnel_free(tunnel); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 256 | } |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | /** |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 261 | * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches |
| 262 | */ |
| 263 | static void tb_free_unplugged_children(struct tb_switch *sw) |
| 264 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 265 | struct tb_port *port; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 266 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 267 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 268 | if (!tb_port_has_remote(port)) |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 269 | continue; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 270 | |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 271 | if (port->remote->sw->is_unplugged) { |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 272 | tb_switch_remove(port->remote->sw); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 273 | port->remote = NULL; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 274 | if (port->dual_link_port) |
| 275 | port->dual_link_port->remote = NULL; |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 276 | } else { |
| 277 | tb_free_unplugged_children(port->remote->sw); |
| 278 | } |
| 279 | } |
| 280 | } |
| 281 | |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 282 | /** |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 283 | * tb_find_port() - return the first port of @type on @sw or NULL |
| 284 | * @sw: Switch to find the port from |
| 285 | * @type: Port type to look for |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 286 | */ |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 287 | static struct tb_port *tb_find_port(struct tb_switch *sw, |
| 288 | enum tb_port_type type) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 289 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 290 | struct tb_port *port; |
| 291 | |
| 292 | tb_switch_for_each_port(sw, port) { |
| 293 | if (port->config.type == type) |
| 294 | return port; |
| 295 | } |
| 296 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 297 | return NULL; |
| 298 | } |
| 299 | |
| 300 | /** |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 301 | * tb_find_unused_port() - return the first inactive port on @sw |
| 302 | * @sw: Switch to find the port on |
| 303 | * @type: Port type to look for |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 304 | */ |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 305 | static struct tb_port *tb_find_unused_port(struct tb_switch *sw, |
| 306 | enum tb_port_type type) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 307 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 308 | struct tb_port *port; |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 309 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 310 | tb_switch_for_each_port(sw, port) { |
| 311 | if (tb_is_upstream_port(port)) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 312 | continue; |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 313 | if (port->config.type != type) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 314 | continue; |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 315 | if (port->cap_adap) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 316 | continue; |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 317 | if (tb_port_is_enabled(port)) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 318 | continue; |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 319 | return port; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 320 | } |
| 321 | return NULL; |
| 322 | } |
| 323 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 324 | static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, |
| 325 | const struct tb_port *port) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 326 | { |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 327 | /* |
| 328 | * To keep plugging devices consistently in the same PCIe |
| 329 | * hierarchy, do mapping here for root switch downstream PCIe |
| 330 | * ports. |
| 331 | */ |
| 332 | if (!tb_route(sw)) { |
| 333 | int phy_port = tb_phy_port_from_link(port->port); |
| 334 | int index; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 335 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 336 | /* |
| 337 | * Hard-coded Thunderbolt port to PCIe down port mapping |
| 338 | * per controller. |
| 339 | */ |
| 340 | if (tb_switch_is_cr(sw)) |
| 341 | index = !phy_port ? 6 : 7; |
| 342 | else if (tb_switch_is_fr(sw)) |
| 343 | index = !phy_port ? 6 : 8; |
| 344 | else |
| 345 | goto out; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 346 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 347 | /* Validate the hard-coding */ |
| 348 | if (WARN_ON(index > sw->config.max_port_number)) |
| 349 | goto out; |
| 350 | if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index]))) |
| 351 | goto out; |
| 352 | if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index]))) |
| 353 | goto out; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 354 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 355 | return &sw->ports[index]; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 356 | } |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 357 | |
| 358 | out: |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 359 | return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 360 | } |
| 361 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 362 | static int tb_tunnel_dp(struct tb *tb, struct tb_port *out) |
| 363 | { |
| 364 | struct tb_cm *tcm = tb_priv(tb); |
| 365 | struct tb_switch *sw = out->sw; |
| 366 | struct tb_tunnel *tunnel; |
| 367 | struct tb_port *in; |
| 368 | |
| 369 | if (tb_port_is_enabled(out)) |
| 370 | return 0; |
| 371 | |
| 372 | do { |
| 373 | sw = tb_to_switch(sw->dev.parent); |
| 374 | if (!sw) |
| 375 | return 0; |
| 376 | in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN); |
| 377 | } while (!in); |
| 378 | |
| 379 | tunnel = tb_tunnel_alloc_dp(tb, in, out); |
| 380 | if (!tunnel) { |
| 381 | tb_port_dbg(out, "DP tunnel allocation failed\n"); |
| 382 | return -ENOMEM; |
| 383 | } |
| 384 | |
| 385 | if (tb_tunnel_activate(tunnel)) { |
| 386 | tb_port_info(out, "DP tunnel activation failed, aborting\n"); |
| 387 | tb_tunnel_free(tunnel); |
| 388 | return -EIO; |
| 389 | } |
| 390 | |
| 391 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
| 392 | return 0; |
| 393 | } |
| 394 | |
| 395 | static void tb_teardown_dp(struct tb *tb, struct tb_port *out) |
| 396 | { |
| 397 | tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out); |
| 398 | } |
| 399 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 400 | static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) |
| 401 | { |
| 402 | struct tb_port *up, *down, *port; |
| 403 | struct tb_cm *tcm = tb_priv(tb); |
| 404 | struct tb_switch *parent_sw; |
| 405 | struct tb_tunnel *tunnel; |
| 406 | |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 407 | up = tb_find_port(sw, TB_TYPE_PCIE_UP); |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 408 | if (!up) |
| 409 | return 0; |
| 410 | |
| 411 | /* |
| 412 | * Look up available down port. Since we are chaining it should |
| 413 | * be found right above this switch. |
| 414 | */ |
| 415 | parent_sw = tb_to_switch(sw->dev.parent); |
| 416 | port = tb_port_at(tb_route(sw), parent_sw); |
| 417 | down = tb_find_pcie_down(parent_sw, port); |
| 418 | if (!down) |
| 419 | return 0; |
| 420 | |
| 421 | tunnel = tb_tunnel_alloc_pci(tb, up, down); |
| 422 | if (!tunnel) |
| 423 | return -ENOMEM; |
| 424 | |
| 425 | if (tb_tunnel_activate(tunnel)) { |
| 426 | tb_port_info(up, |
| 427 | "PCIe tunnel activation failed, aborting\n"); |
| 428 | tb_tunnel_free(tunnel); |
| 429 | return -EIO; |
| 430 | } |
| 431 | |
| 432 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
| 433 | return 0; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 434 | } |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 435 | |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 436 | static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
| 437 | { |
| 438 | struct tb_cm *tcm = tb_priv(tb); |
| 439 | struct tb_port *nhi_port, *dst_port; |
| 440 | struct tb_tunnel *tunnel; |
| 441 | struct tb_switch *sw; |
| 442 | |
| 443 | sw = tb_to_switch(xd->dev.parent); |
| 444 | dst_port = tb_port_at(xd->route, sw); |
| 445 | nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI); |
| 446 | |
| 447 | mutex_lock(&tb->lock); |
| 448 | tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, |
| 449 | xd->transmit_path, xd->receive_ring, |
| 450 | xd->receive_path); |
| 451 | if (!tunnel) { |
| 452 | mutex_unlock(&tb->lock); |
| 453 | return -ENOMEM; |
| 454 | } |
| 455 | |
| 456 | if (tb_tunnel_activate(tunnel)) { |
| 457 | tb_port_info(nhi_port, |
| 458 | "DMA tunnel activation failed, aborting\n"); |
| 459 | tb_tunnel_free(tunnel); |
| 460 | mutex_unlock(&tb->lock); |
| 461 | return -EIO; |
| 462 | } |
| 463 | |
| 464 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
| 465 | mutex_unlock(&tb->lock); |
| 466 | return 0; |
| 467 | } |
| 468 | |
| 469 | static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
| 470 | { |
| 471 | struct tb_port *dst_port; |
| 472 | struct tb_switch *sw; |
| 473 | |
| 474 | sw = tb_to_switch(xd->dev.parent); |
| 475 | dst_port = tb_port_at(xd->route, sw); |
| 476 | |
| 477 | /* |
| 478 | * It is possible that the tunnel was already teared down (in |
| 479 | * case of cable disconnect) so it is fine if we cannot find it |
| 480 | * here anymore. |
| 481 | */ |
| 482 | tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); |
| 483 | } |
| 484 | |
| 485 | static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
| 486 | { |
| 487 | if (!xd->is_unplugged) { |
| 488 | mutex_lock(&tb->lock); |
| 489 | __tb_disconnect_xdomain_paths(tb, xd); |
| 490 | mutex_unlock(&tb->lock); |
| 491 | } |
| 492 | return 0; |
| 493 | } |
| 494 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 495 | /* hotplug handling */ |
| 496 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 497 | /** |
| 498 | * tb_handle_hotplug() - handle hotplug event |
| 499 | * |
| 500 | * Executes on tb->wq. |
| 501 | */ |
| 502 | static void tb_handle_hotplug(struct work_struct *work) |
| 503 | { |
| 504 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); |
| 505 | struct tb *tb = ev->tb; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 506 | struct tb_cm *tcm = tb_priv(tb); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 507 | struct tb_switch *sw; |
| 508 | struct tb_port *port; |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 509 | mutex_lock(&tb->lock); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 510 | if (!tcm->hotplug_active) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 511 | goto out; /* during init, suspend or shutdown */ |
| 512 | |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 513 | sw = tb_switch_find_by_route(tb, ev->route); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 514 | if (!sw) { |
| 515 | tb_warn(tb, |
| 516 | "hotplug event from non existent switch %llx:%x (unplug: %d)\n", |
| 517 | ev->route, ev->port, ev->unplug); |
| 518 | goto out; |
| 519 | } |
| 520 | if (ev->port > sw->config.max_port_number) { |
| 521 | tb_warn(tb, |
| 522 | "hotplug event from non existent port %llx:%x (unplug: %d)\n", |
| 523 | ev->route, ev->port, ev->unplug); |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 524 | goto put_sw; |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 525 | } |
| 526 | port = &sw->ports[ev->port]; |
| 527 | if (tb_is_upstream_port(port)) { |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 528 | tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", |
| 529 | ev->route, ev->port, ev->unplug); |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 530 | goto put_sw; |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 531 | } |
| 532 | if (ev->unplug) { |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 533 | if (tb_port_has_remote(port)) { |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 534 | tb_port_dbg(port, "switch unplugged\n"); |
Lukas Wunner | aae20bb | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 535 | tb_sw_set_unplugged(port->remote->sw); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 536 | tb_free_invalid_tunnels(tb); |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 537 | tb_switch_remove(port->remote->sw); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 538 | port->remote = NULL; |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 539 | if (port->dual_link_port) |
| 540 | port->dual_link_port->remote = NULL; |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 541 | } else if (port->xdomain) { |
| 542 | struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); |
| 543 | |
| 544 | tb_port_dbg(port, "xdomain unplugged\n"); |
| 545 | /* |
| 546 | * Service drivers are unbound during |
| 547 | * tb_xdomain_remove() so setting XDomain as |
| 548 | * unplugged here prevents deadlock if they call |
| 549 | * tb_xdomain_disable_paths(). We will tear down |
| 550 | * the path below. |
| 551 | */ |
| 552 | xd->is_unplugged = true; |
| 553 | tb_xdomain_remove(xd); |
| 554 | port->xdomain = NULL; |
| 555 | __tb_disconnect_xdomain_paths(tb, xd); |
| 556 | tb_xdomain_put(xd); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 557 | } else if (tb_port_is_dpout(port)) { |
| 558 | tb_teardown_dp(tb, port); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 559 | } else { |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 560 | tb_port_dbg(port, |
| 561 | "got unplug event for disconnected port, ignoring\n"); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 562 | } |
| 563 | } else if (port->remote) { |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 564 | tb_port_dbg(port, "got plug event for connected port, ignoring\n"); |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 565 | } else { |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 566 | if (tb_port_is_null(port)) { |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 567 | tb_port_dbg(port, "hotplug: scanning\n"); |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 568 | tb_scan_port(port); |
| 569 | if (!port->remote) |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 570 | tb_port_dbg(port, "hotplug: no switch found\n"); |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 571 | } else if (tb_port_is_dpout(port)) { |
| 572 | tb_tunnel_dp(tb, port); |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 573 | } |
Andreas Noever | 053596d | 2014-06-03 22:04:06 +0200 | [diff] [blame] | 574 | } |
Mika Westerberg | 8f965ef | 2019-03-15 14:56:21 +0200 | [diff] [blame] | 575 | |
| 576 | put_sw: |
| 577 | tb_switch_put(sw); |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 578 | out: |
| 579 | mutex_unlock(&tb->lock); |
| 580 | kfree(ev); |
| 581 | } |
| 582 | |
| 583 | /** |
| 584 | * tb_schedule_hotplug_handler() - callback function for the control channel |
| 585 | * |
| 586 | * Delegates to tb_handle_hotplug. |
| 587 | */ |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 588 | static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, |
| 589 | const void *buf, size_t size) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 590 | { |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 591 | const struct cfg_event_pkg *pkg = buf; |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 592 | u64 route; |
| 593 | |
| 594 | if (type != TB_CFG_PKG_EVENT) { |
| 595 | tb_warn(tb, "unexpected event %#x, ignoring\n", type); |
| 596 | return; |
| 597 | } |
| 598 | |
| 599 | route = tb_cfg_get_route(&pkg->header); |
| 600 | |
| 601 | if (tb_cfg_error(tb->ctl, route, pkg->port, |
| 602 | TB_CFG_ERROR_ACK_PLUG_EVENT)) { |
| 603 | tb_warn(tb, "could not ack plug event on %llx:%x\n", route, |
| 604 | pkg->port); |
| 605 | } |
| 606 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 607 | tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 608 | } |
| 609 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 610 | static void tb_stop(struct tb *tb) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 611 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 612 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 613 | struct tb_tunnel *tunnel; |
| 614 | struct tb_tunnel *n; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 615 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 616 | /* tunnels are only present after everything has been initialized */ |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 617 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
| 618 | /* |
| 619 | * DMA tunnels require the driver to be functional so we |
| 620 | * tear them down. Other protocol tunnels can be left |
| 621 | * intact. |
| 622 | */ |
| 623 | if (tb_tunnel_is_dma(tunnel)) |
| 624 | tb_tunnel_deactivate(tunnel); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 625 | tb_tunnel_free(tunnel); |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 626 | } |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 627 | tb_switch_remove(tb->root_switch); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 628 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 629 | } |
| 630 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 631 | static int tb_scan_finalize_switch(struct device *dev, void *data) |
| 632 | { |
| 633 | if (tb_is_switch(dev)) { |
| 634 | struct tb_switch *sw = tb_to_switch(dev); |
| 635 | |
| 636 | /* |
| 637 | * If we found that the switch was already setup by the |
| 638 | * boot firmware, mark it as authorized now before we |
| 639 | * send uevent to userspace. |
| 640 | */ |
| 641 | if (sw->boot) |
| 642 | sw->authorized = 1; |
| 643 | |
| 644 | dev_set_uevent_suppress(dev, false); |
| 645 | kobject_uevent(&dev->kobj, KOBJ_ADD); |
| 646 | device_for_each_child(dev, NULL, tb_scan_finalize_switch); |
| 647 | } |
| 648 | |
| 649 | return 0; |
| 650 | } |
| 651 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 652 | static int tb_start(struct tb *tb) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 653 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 654 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 655 | int ret; |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 656 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 657 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); |
Mika Westerberg | 444ac38 | 2018-12-30 12:17:52 +0200 | [diff] [blame] | 658 | if (IS_ERR(tb->root_switch)) |
| 659 | return PTR_ERR(tb->root_switch); |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 660 | |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 661 | /* |
| 662 | * ICM firmware upgrade needs running firmware and in native |
| 663 | * mode that is not available so disable firmware upgrade of the |
| 664 | * root switch. |
| 665 | */ |
| 666 | tb->root_switch->no_nvm_upgrade = true; |
| 667 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 668 | ret = tb_switch_configure(tb->root_switch); |
| 669 | if (ret) { |
| 670 | tb_switch_put(tb->root_switch); |
| 671 | return ret; |
| 672 | } |
| 673 | |
| 674 | /* Announce the switch to the world */ |
| 675 | ret = tb_switch_add(tb->root_switch); |
| 676 | if (ret) { |
| 677 | tb_switch_put(tb->root_switch); |
| 678 | return ret; |
| 679 | } |
| 680 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 681 | /* Full scan to discover devices added before the driver was loaded. */ |
| 682 | tb_scan_switch(tb->root_switch); |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 683 | /* Find out tunnels created by the boot firmware */ |
| 684 | tb_discover_tunnels(tb->root_switch); |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 685 | /* Make the discovered switches available to the userspace */ |
| 686 | device_for_each_child(&tb->root_switch->dev, NULL, |
| 687 | tb_scan_finalize_switch); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 688 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 689 | /* Allow tb_handle_hotplug to progress events */ |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 690 | tcm->hotplug_active = true; |
| 691 | return 0; |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 692 | } |
| 693 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 694 | static int tb_suspend_noirq(struct tb *tb) |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 695 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 696 | struct tb_cm *tcm = tb_priv(tb); |
| 697 | |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 698 | tb_dbg(tb, "suspending...\n"); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 699 | tb_switch_suspend(tb->root_switch); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 700 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 701 | tb_dbg(tb, "suspend finished\n"); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 702 | |
| 703 | return 0; |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 704 | } |
| 705 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 706 | static int tb_resume_noirq(struct tb *tb) |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 707 | { |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 708 | struct tb_cm *tcm = tb_priv(tb); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 709 | struct tb_tunnel *tunnel, *n; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 710 | |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 711 | tb_dbg(tb, "resuming...\n"); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 712 | |
| 713 | /* remove any pci devices the firmware might have setup */ |
| 714 | tb_switch_reset(tb, 0); |
| 715 | |
| 716 | tb_switch_resume(tb->root_switch); |
| 717 | tb_free_invalid_tunnels(tb); |
| 718 | tb_free_unplugged_children(tb->root_switch); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 719 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 720 | tb_tunnel_restart(tunnel); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 721 | if (!list_empty(&tcm->tunnel_list)) { |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 722 | /* |
| 723 | * the pcie links need some time to get going. |
| 724 | * 100ms works for me... |
| 725 | */ |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 726 | tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 727 | msleep(100); |
| 728 | } |
| 729 | /* Allow tb_handle_hotplug to progress events */ |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 730 | tcm->hotplug_active = true; |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 731 | tb_dbg(tb, "resume finished\n"); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 732 | |
| 733 | return 0; |
| 734 | } |
| 735 | |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 736 | static int tb_free_unplugged_xdomains(struct tb_switch *sw) |
| 737 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 738 | struct tb_port *port; |
| 739 | int ret = 0; |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 740 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 741 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 742 | if (tb_is_upstream_port(port)) |
| 743 | continue; |
| 744 | if (port->xdomain && port->xdomain->is_unplugged) { |
| 745 | tb_xdomain_remove(port->xdomain); |
| 746 | port->xdomain = NULL; |
| 747 | ret++; |
| 748 | } else if (port->remote) { |
| 749 | ret += tb_free_unplugged_xdomains(port->remote->sw); |
| 750 | } |
| 751 | } |
| 752 | |
| 753 | return ret; |
| 754 | } |
| 755 | |
| 756 | static void tb_complete(struct tb *tb) |
| 757 | { |
| 758 | /* |
| 759 | * Release any unplugged XDomains and if there is a case where |
| 760 | * another domain is swapped in place of unplugged XDomain we |
| 761 | * need to run another rescan. |
| 762 | */ |
| 763 | mutex_lock(&tb->lock); |
| 764 | if (tb_free_unplugged_xdomains(tb->root_switch)) |
| 765 | tb_scan_switch(tb->root_switch); |
| 766 | mutex_unlock(&tb->lock); |
| 767 | } |
| 768 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 769 | static const struct tb_cm_ops tb_cm_ops = { |
| 770 | .start = tb_start, |
| 771 | .stop = tb_stop, |
| 772 | .suspend_noirq = tb_suspend_noirq, |
| 773 | .resume_noirq = tb_resume_noirq, |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 774 | .complete = tb_complete, |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 775 | .handle_event = tb_handle_event, |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 776 | .approve_switch = tb_tunnel_pci, |
Mika Westerberg | 7ea4cd6 | 2018-09-28 16:41:01 +0300 | [diff] [blame] | 777 | .approve_xdomain_paths = tb_approve_xdomain_paths, |
| 778 | .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 779 | }; |
| 780 | |
| 781 | struct tb *tb_probe(struct tb_nhi *nhi) |
| 782 | { |
| 783 | struct tb_cm *tcm; |
| 784 | struct tb *tb; |
| 785 | |
Lukas Wunner | 630b3af | 2017-08-01 14:10:41 +0200 | [diff] [blame] | 786 | if (!x86_apple_machine) |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 787 | return NULL; |
| 788 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 789 | tb = tb_domain_alloc(nhi, sizeof(*tcm)); |
| 790 | if (!tb) |
| 791 | return NULL; |
| 792 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 793 | tb->security_level = TB_SECURITY_USER; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 794 | tb->cm_ops = &tb_cm_ops; |
| 795 | |
| 796 | tcm = tb_priv(tb); |
| 797 | INIT_LIST_HEAD(&tcm->tunnel_list); |
| 798 | |
| 799 | return tb; |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 800 | } |