Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 2 | /* |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 3 | * Thunderbolt driver - Tunneling support |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 6 | * Copyright (C) 2019, Intel Corporation |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/list.h> |
| 11 | |
Mika Westerberg | 1752b9f | 2017-02-19 10:58:35 +0200 | [diff] [blame] | 12 | #include "tunnel.h" |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 13 | #include "tb.h" |
| 14 | |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 15 | /* PCIe adapters use always HopID of 8 for both directions */ |
| 16 | #define TB_PCI_HOPID 8 |
| 17 | |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 18 | #define TB_PCI_PATH_DOWN 0 |
| 19 | #define TB_PCI_PATH_UP 1 |
| 20 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 21 | #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ |
| 22 | do { \ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 23 | struct tb_tunnel *__tunnel = (tunnel); \ |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 24 | level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 25 | tb_route(__tunnel->src_port->sw), \ |
| 26 | __tunnel->src_port->port, \ |
| 27 | tb_route(__tunnel->dst_port->sw), \ |
| 28 | __tunnel->dst_port->port, \ |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 29 | ## arg); \ |
| 30 | } while (0) |
| 31 | |
| 32 | #define tb_tunnel_WARN(tunnel, fmt, arg...) \ |
| 33 | __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) |
| 34 | #define tb_tunnel_warn(tunnel, fmt, arg...) \ |
| 35 | __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) |
| 36 | #define tb_tunnel_info(tunnel, fmt, arg...) \ |
| 37 | __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 38 | #define tb_tunnel_dbg(tunnel, fmt, arg...) \ |
| 39 | __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 40 | |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 41 | static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths) |
| 42 | { |
| 43 | struct tb_tunnel *tunnel; |
| 44 | |
| 45 | tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); |
| 46 | if (!tunnel) |
| 47 | return NULL; |
| 48 | |
| 49 | tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); |
| 50 | if (!tunnel->paths) { |
| 51 | tb_tunnel_free(tunnel); |
| 52 | return NULL; |
| 53 | } |
| 54 | |
| 55 | INIT_LIST_HEAD(&tunnel->list); |
| 56 | tunnel->tb = tb; |
| 57 | tunnel->npaths = npaths; |
| 58 | |
| 59 | return tunnel; |
| 60 | } |
| 61 | |
| 62 | static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) |
| 63 | { |
| 64 | int res; |
| 65 | |
| 66 | res = tb_pci_port_enable(tunnel->src_port, activate); |
| 67 | if (res) |
| 68 | return res; |
| 69 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 70 | if (tb_port_is_pcie_up(tunnel->dst_port)) |
| 71 | return tb_pci_port_enable(tunnel->dst_port, activate); |
| 72 | |
| 73 | return 0; |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 74 | } |
| 75 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 76 | static void tb_pci_init_path(struct tb_path *path) |
| 77 | { |
| 78 | path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; |
| 79 | path->egress_shared_buffer = TB_PATH_NONE; |
| 80 | path->ingress_fc_enable = TB_PATH_ALL; |
| 81 | path->ingress_shared_buffer = TB_PATH_NONE; |
| 82 | path->priority = 3; |
| 83 | path->weight = 1; |
| 84 | path->drop_packages = 0; |
| 85 | path->nfc_credits = 0; |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 86 | path->hops[0].initial_credits = 7; |
| 87 | path->hops[1].initial_credits = 16; |
| 88 | } |
| 89 | |
| 90 | /** |
| 91 | * tb_tunnel_discover_pci() - Discover existing PCIe tunnels |
| 92 | * @tb: Pointer to the domain structure |
| 93 | * @down: PCIe downstream adapter |
| 94 | * |
| 95 | * If @down adapter is active, follows the tunnel to the PCIe upstream |
| 96 | * adapter and back. Returns the discovered tunnel or %NULL if there was |
| 97 | * no tunnel. |
| 98 | */ |
| 99 | struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) |
| 100 | { |
| 101 | struct tb_tunnel *tunnel; |
| 102 | struct tb_path *path; |
| 103 | |
| 104 | if (!tb_pci_port_is_enabled(down)) |
| 105 | return NULL; |
| 106 | |
| 107 | tunnel = tb_tunnel_alloc(tb, 2); |
| 108 | if (!tunnel) |
| 109 | return NULL; |
| 110 | |
| 111 | tunnel->activate = tb_pci_activate; |
| 112 | tunnel->src_port = down; |
| 113 | |
| 114 | /* |
| 115 | * Discover both paths even if they are not complete. We will |
| 116 | * clean them up by calling tb_tunnel_deactivate() below in that |
| 117 | * case. |
| 118 | */ |
| 119 | path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, |
| 120 | &tunnel->dst_port, "PCIe Up"); |
| 121 | if (!path) { |
| 122 | /* Just disable the downstream port */ |
| 123 | tb_pci_port_enable(down, false); |
| 124 | goto err_free; |
| 125 | } |
| 126 | tunnel->paths[TB_PCI_PATH_UP] = path; |
| 127 | tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]); |
| 128 | |
| 129 | path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, |
| 130 | "PCIe Down"); |
| 131 | if (!path) |
| 132 | goto err_deactivate; |
| 133 | tunnel->paths[TB_PCI_PATH_DOWN] = path; |
| 134 | tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]); |
| 135 | |
| 136 | /* Validate that the tunnel is complete */ |
| 137 | if (!tb_port_is_pcie_up(tunnel->dst_port)) { |
| 138 | tb_port_warn(tunnel->dst_port, |
| 139 | "path does not end on a PCIe adapter, cleaning up\n"); |
| 140 | goto err_deactivate; |
| 141 | } |
| 142 | |
| 143 | if (down != tunnel->src_port) { |
| 144 | tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); |
| 145 | goto err_deactivate; |
| 146 | } |
| 147 | |
| 148 | if (!tb_pci_port_is_enabled(tunnel->dst_port)) { |
| 149 | tb_tunnel_warn(tunnel, |
| 150 | "tunnel is not fully activated, cleaning up\n"); |
| 151 | goto err_deactivate; |
| 152 | } |
| 153 | |
| 154 | tb_tunnel_dbg(tunnel, "discovered\n"); |
| 155 | return tunnel; |
| 156 | |
| 157 | err_deactivate: |
| 158 | tb_tunnel_deactivate(tunnel); |
| 159 | err_free: |
| 160 | tb_tunnel_free(tunnel); |
| 161 | |
| 162 | return NULL; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | /** |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 166 | * tb_tunnel_alloc_pci() - allocate a pci tunnel |
| 167 | * @tb: Pointer to the domain structure |
| 168 | * @up: PCIe upstream adapter port |
| 169 | * @down: PCIe downstream adapter port |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 170 | * |
| 171 | * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and |
| 172 | * TB_TYPE_PCIE_DOWN. |
| 173 | * |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 174 | * Return: Returns a tb_tunnel on success or NULL on failure. |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 175 | */ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 176 | struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, |
| 177 | struct tb_port *down) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 178 | { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 179 | struct tb_tunnel *tunnel; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 180 | struct tb_path *path; |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 181 | |
| 182 | tunnel = tb_tunnel_alloc(tb, 2); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 183 | if (!tunnel) |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 184 | return NULL; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 185 | |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 186 | tunnel->activate = tb_pci_activate; |
| 187 | tunnel->src_port = down; |
| 188 | tunnel->dst_port = up; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 189 | |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 190 | path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0, |
| 191 | "PCIe Down"); |
| 192 | if (!path) { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 193 | tb_tunnel_free(tunnel); |
| 194 | return NULL; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 195 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 196 | tb_pci_init_path(path); |
| 197 | tunnel->paths[TB_PCI_PATH_UP] = path; |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 198 | |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 199 | path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, |
| 200 | "PCIe Up"); |
| 201 | if (!path) { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 202 | tb_tunnel_free(tunnel); |
| 203 | return NULL; |
| 204 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 205 | tb_pci_init_path(path); |
| 206 | tunnel->paths[TB_PCI_PATH_DOWN] = path; |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 207 | |
| 208 | return tunnel; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | /** |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 212 | * tb_tunnel_free() - free a tunnel |
| 213 | * @tunnel: Tunnel to be freed |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 214 | * |
| 215 | * The tunnel must have been deactivated. |
| 216 | */ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 217 | void tb_tunnel_free(struct tb_tunnel *tunnel) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 218 | { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 219 | int i; |
| 220 | |
| 221 | if (!tunnel) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 222 | return; |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 223 | |
| 224 | for (i = 0; i < tunnel->npaths; i++) { |
| 225 | if (tunnel->paths[i] && tunnel->paths[i]->activated) { |
| 226 | tb_tunnel_WARN(tunnel, |
| 227 | "trying to free an activated tunnel\n"); |
| 228 | return; |
| 229 | } |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 230 | } |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 231 | |
| 232 | for (i = 0; i < tunnel->npaths; i++) { |
| 233 | if (tunnel->paths[i]) |
| 234 | tb_path_free(tunnel->paths[i]); |
| 235 | } |
| 236 | |
| 237 | kfree(tunnel->paths); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 238 | kfree(tunnel); |
| 239 | } |
| 240 | |
| 241 | /** |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 242 | * tb_tunnel_is_invalid - check whether an activated path is still valid |
| 243 | * @tunnel: Tunnel to check |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 244 | */ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 245 | bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 246 | { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 247 | int i; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 248 | |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 249 | for (i = 0; i < tunnel->npaths; i++) { |
| 250 | WARN_ON(!tunnel->paths[i]->activated); |
| 251 | if (tb_path_is_invalid(tunnel->paths[i])) |
| 252 | return true; |
| 253 | } |
| 254 | |
| 255 | return false; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | /** |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 259 | * tb_tunnel_restart() - activate a tunnel after a hardware reset |
| 260 | * @tunnel: Tunnel to restart |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 261 | * |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 262 | * Return: 0 on success and negative errno in case if failure |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 263 | */ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 264 | int tb_tunnel_restart(struct tb_tunnel *tunnel) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 265 | { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 266 | int res, i; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 267 | |
| 268 | tb_tunnel_info(tunnel, "activating\n"); |
| 269 | |
Mika Westerberg | aae9e27 | 2017-02-19 23:37:35 +0200 | [diff] [blame] | 270 | /* |
| 271 | * Make sure all paths are properly disabled before enabling |
| 272 | * them again. |
| 273 | */ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 274 | for (i = 0; i < tunnel->npaths; i++) { |
Mika Westerberg | aae9e27 | 2017-02-19 23:37:35 +0200 | [diff] [blame] | 275 | if (tunnel->paths[i]->activated) { |
| 276 | tb_path_deactivate(tunnel->paths[i]); |
| 277 | tunnel->paths[i]->activated = false; |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | for (i = 0; i < tunnel->npaths; i++) { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 282 | res = tb_path_activate(tunnel->paths[i]); |
| 283 | if (res) |
| 284 | goto err; |
| 285 | } |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 286 | |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 287 | if (tunnel->activate) { |
| 288 | res = tunnel->activate(tunnel, true); |
| 289 | if (res) |
| 290 | goto err; |
| 291 | } |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 292 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 293 | return 0; |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 294 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 295 | err: |
| 296 | tb_tunnel_warn(tunnel, "activation failed\n"); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 297 | tb_tunnel_deactivate(tunnel); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 298 | return res; |
| 299 | } |
| 300 | |
| 301 | /** |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 302 | * tb_tunnel_activate() - activate a tunnel |
| 303 | * @tunnel: Tunnel to activate |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 304 | * |
| 305 | * Return: Returns 0 on success or an error code on failure. |
| 306 | */ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 307 | int tb_tunnel_activate(struct tb_tunnel *tunnel) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 308 | { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 309 | int i; |
| 310 | |
| 311 | tb_tunnel_info(tunnel, "activating\n"); |
| 312 | |
| 313 | for (i = 0; i < tunnel->npaths; i++) { |
| 314 | if (tunnel->paths[i]->activated) { |
| 315 | tb_tunnel_WARN(tunnel, |
| 316 | "trying to activate an already activated tunnel\n"); |
| 317 | return -EINVAL; |
| 318 | } |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 319 | } |
| 320 | |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 321 | return tb_tunnel_restart(tunnel); |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 322 | } |
| 323 | |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 324 | /** |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 325 | * tb_tunnel_deactivate() - deactivate a tunnel |
| 326 | * @tunnel: Tunnel to deactivate |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 327 | */ |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 328 | void tb_tunnel_deactivate(struct tb_tunnel *tunnel) |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 329 | { |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 330 | int i; |
Andreas Noever | 3364f0c | 2014-06-03 22:04:08 +0200 | [diff] [blame] | 331 | |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 332 | tb_tunnel_info(tunnel, "deactivating\n"); |
| 333 | |
| 334 | if (tunnel->activate) |
| 335 | tunnel->activate(tunnel, false); |
| 336 | |
| 337 | for (i = 0; i < tunnel->npaths; i++) { |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 338 | if (tunnel->paths[i] && tunnel->paths[i]->activated) |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 339 | tb_path_deactivate(tunnel->paths[i]); |
| 340 | } |
| 341 | } |