blob: 21d3393c6e9c2eb8fb86bad049517bb55ddf527b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
9#include <linux/slab.h>
10#include <linux/list.h>
11
Mika Westerberg1752b9f2017-02-19 10:58:35 +020012#include "tunnel.h"
Andreas Noever3364f0c2014-06-03 22:04:08 +020013#include "tb.h"
14
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +020015/* PCIe adapters use always HopID of 8 for both directions */
16#define TB_PCI_HOPID 8
17
Mika Westerberg93f36ad2017-02-19 13:48:29 +020018#define TB_PCI_PATH_DOWN 0
19#define TB_PCI_PATH_UP 1
20
Mika Westerberg4f807e42018-09-17 16:30:49 +030021/* DP adapters use HopID 8 for AUX and 9 for Video */
22#define TB_DP_AUX_TX_HOPID 8
23#define TB_DP_AUX_RX_HOPID 8
24#define TB_DP_VIDEO_HOPID 9
25
26#define TB_DP_VIDEO_PATH_OUT 0
27#define TB_DP_AUX_PATH_OUT 1
28#define TB_DP_AUX_PATH_IN 2
29
30static const char * const tb_tunnel_names[] = { "PCI", "DP" };
31
Andreas Noever3364f0c2014-06-03 22:04:08 +020032#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
33 do { \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020034 struct tb_tunnel *__tunnel = (tunnel); \
Mika Westerberg4f807e42018-09-17 16:30:49 +030035 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020036 tb_route(__tunnel->src_port->sw), \
37 __tunnel->src_port->port, \
38 tb_route(__tunnel->dst_port->sw), \
39 __tunnel->dst_port->port, \
Mika Westerberg4f807e42018-09-17 16:30:49 +030040 tb_tunnel_names[__tunnel->type], \
Andreas Noever3364f0c2014-06-03 22:04:08 +020041 ## arg); \
42 } while (0)
43
44#define tb_tunnel_WARN(tunnel, fmt, arg...) \
45 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
46#define tb_tunnel_warn(tunnel, fmt, arg...) \
47 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
48#define tb_tunnel_info(tunnel, fmt, arg...) \
49 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
Mika Westerberg0414bec2017-02-19 23:43:26 +020050#define tb_tunnel_dbg(tunnel, fmt, arg...) \
51 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
Andreas Noever3364f0c2014-06-03 22:04:08 +020052
Mika Westerberg4f807e42018-09-17 16:30:49 +030053static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
54 enum tb_tunnel_type type)
Mika Westerberg93f36ad2017-02-19 13:48:29 +020055{
56 struct tb_tunnel *tunnel;
57
58 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
59 if (!tunnel)
60 return NULL;
61
62 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
63 if (!tunnel->paths) {
64 tb_tunnel_free(tunnel);
65 return NULL;
66 }
67
68 INIT_LIST_HEAD(&tunnel->list);
69 tunnel->tb = tb;
70 tunnel->npaths = npaths;
Mika Westerberg4f807e42018-09-17 16:30:49 +030071 tunnel->type = type;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020072
73 return tunnel;
74}
75
76static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
77{
78 int res;
79
80 res = tb_pci_port_enable(tunnel->src_port, activate);
81 if (res)
82 return res;
83
Mika Westerberg0414bec2017-02-19 23:43:26 +020084 if (tb_port_is_pcie_up(tunnel->dst_port))
85 return tb_pci_port_enable(tunnel->dst_port, activate);
86
87 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020088}
89
Andreas Noever3364f0c2014-06-03 22:04:08 +020090static void tb_pci_init_path(struct tb_path *path)
91{
92 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
93 path->egress_shared_buffer = TB_PATH_NONE;
94 path->ingress_fc_enable = TB_PATH_ALL;
95 path->ingress_shared_buffer = TB_PATH_NONE;
96 path->priority = 3;
97 path->weight = 1;
98 path->drop_packages = 0;
99 path->nfc_credits = 0;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200100 path->hops[0].initial_credits = 7;
101 path->hops[1].initial_credits = 16;
102}
103
104/**
105 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
106 * @tb: Pointer to the domain structure
107 * @down: PCIe downstream adapter
108 *
109 * If @down adapter is active, follows the tunnel to the PCIe upstream
110 * adapter and back. Returns the discovered tunnel or %NULL if there was
111 * no tunnel.
112 */
113struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
114{
115 struct tb_tunnel *tunnel;
116 struct tb_path *path;
117
118 if (!tb_pci_port_is_enabled(down))
119 return NULL;
120
Mika Westerberg4f807e42018-09-17 16:30:49 +0300121 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Mika Westerberg0414bec2017-02-19 23:43:26 +0200122 if (!tunnel)
123 return NULL;
124
125 tunnel->activate = tb_pci_activate;
126 tunnel->src_port = down;
127
128 /*
129 * Discover both paths even if they are not complete. We will
130 * clean them up by calling tb_tunnel_deactivate() below in that
131 * case.
132 */
133 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
134 &tunnel->dst_port, "PCIe Up");
135 if (!path) {
136 /* Just disable the downstream port */
137 tb_pci_port_enable(down, false);
138 goto err_free;
139 }
140 tunnel->paths[TB_PCI_PATH_UP] = path;
141 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
142
143 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
144 "PCIe Down");
145 if (!path)
146 goto err_deactivate;
147 tunnel->paths[TB_PCI_PATH_DOWN] = path;
148 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
149
150 /* Validate that the tunnel is complete */
151 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
152 tb_port_warn(tunnel->dst_port,
153 "path does not end on a PCIe adapter, cleaning up\n");
154 goto err_deactivate;
155 }
156
157 if (down != tunnel->src_port) {
158 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
159 goto err_deactivate;
160 }
161
162 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
163 tb_tunnel_warn(tunnel,
164 "tunnel is not fully activated, cleaning up\n");
165 goto err_deactivate;
166 }
167
168 tb_tunnel_dbg(tunnel, "discovered\n");
169 return tunnel;
170
171err_deactivate:
172 tb_tunnel_deactivate(tunnel);
173err_free:
174 tb_tunnel_free(tunnel);
175
176 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200177}
178
179/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200180 * tb_tunnel_alloc_pci() - allocate a pci tunnel
181 * @tb: Pointer to the domain structure
182 * @up: PCIe upstream adapter port
183 * @down: PCIe downstream adapter port
Andreas Noever3364f0c2014-06-03 22:04:08 +0200184 *
185 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
186 * TB_TYPE_PCIE_DOWN.
187 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200188 * Return: Returns a tb_tunnel on success or NULL on failure.
Andreas Noever3364f0c2014-06-03 22:04:08 +0200189 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200190struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
191 struct tb_port *down)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200192{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200193 struct tb_tunnel *tunnel;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200194 struct tb_path *path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200195
Mika Westerberg4f807e42018-09-17 16:30:49 +0300196 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200197 if (!tunnel)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200198 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200199
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200200 tunnel->activate = tb_pci_activate;
201 tunnel->src_port = down;
202 tunnel->dst_port = up;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200203
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200204 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
205 "PCIe Down");
206 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200207 tb_tunnel_free(tunnel);
208 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200209 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200210 tb_pci_init_path(path);
211 tunnel->paths[TB_PCI_PATH_UP] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200212
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200213 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
214 "PCIe Up");
215 if (!path) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200216 tb_tunnel_free(tunnel);
217 return NULL;
218 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200219 tb_pci_init_path(path);
220 tunnel->paths[TB_PCI_PATH_DOWN] = path;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200221
222 return tunnel;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200223}
224
Mika Westerberg4f807e42018-09-17 16:30:49 +0300225static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
226{
227 struct tb_port *out = tunnel->dst_port;
228 struct tb_port *in = tunnel->src_port;
229 u32 in_dp_cap, out_dp_cap;
230 int ret;
231
232 /*
233 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
234 * newer generation hardware.
235 */
236 if (in->sw->generation < 2 || out->sw->generation < 2)
237 return 0;
238
239 /* Read both DP_LOCAL_CAP registers */
240 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
241 in->cap_adap + TB_DP_LOCAL_CAP, 1);
242 if (ret)
243 return ret;
244
245 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
246 out->cap_adap + TB_DP_LOCAL_CAP, 1);
247 if (ret)
248 return ret;
249
250 /* Write IN local caps to OUT remote caps */
251 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
252 out->cap_adap + TB_DP_REMOTE_CAP, 1);
253 if (ret)
254 return ret;
255
256 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
257 in->cap_adap + TB_DP_REMOTE_CAP, 1);
258}
259
260static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
261{
262 int ret;
263
264 if (active) {
265 struct tb_path **paths;
266 int last;
267
268 paths = tunnel->paths;
269 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
270
271 tb_dp_port_set_hops(tunnel->src_port,
272 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
273 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
274 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
275
276 tb_dp_port_set_hops(tunnel->dst_port,
277 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
278 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
279 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
280 } else {
281 tb_dp_port_hpd_clear(tunnel->src_port);
282 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
283 if (tb_port_is_dpout(tunnel->dst_port))
284 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
285 }
286
287 ret = tb_dp_port_enable(tunnel->src_port, active);
288 if (ret)
289 return ret;
290
291 if (tb_port_is_dpout(tunnel->dst_port))
292 return tb_dp_port_enable(tunnel->dst_port, active);
293
294 return 0;
295}
296
297static void tb_dp_init_aux_path(struct tb_path *path)
298{
299 int i;
300
301 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
302 path->egress_shared_buffer = TB_PATH_NONE;
303 path->ingress_fc_enable = TB_PATH_ALL;
304 path->ingress_shared_buffer = TB_PATH_NONE;
305 path->priority = 2;
306 path->weight = 1;
307
308 for (i = 0; i < path->path_length; i++)
309 path->hops[i].initial_credits = 1;
310}
311
312static void tb_dp_init_video_path(struct tb_path *path, bool discover)
313{
314 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
315
316 path->egress_fc_enable = TB_PATH_NONE;
317 path->egress_shared_buffer = TB_PATH_NONE;
318 path->ingress_fc_enable = TB_PATH_NONE;
319 path->ingress_shared_buffer = TB_PATH_NONE;
320 path->priority = 1;
321 path->weight = 1;
322
323 if (discover) {
324 path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
325 } else {
326 u32 max_credits;
327
328 max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
329 TB_PORT_MAX_CREDITS_SHIFT;
330 /* Leave some credits for AUX path */
331 path->nfc_credits = min(max_credits - 2, 12U);
332 }
333}
334
335/**
336 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
337 * @tb: Pointer to the domain structure
338 * @in: DP in adapter
339 *
340 * If @in adapter is active, follows the tunnel to the DP out adapter
341 * and back. Returns the discovered tunnel or %NULL if there was no
342 * tunnel.
343 *
344 * Return: DP tunnel or %NULL if no tunnel found.
345 */
346struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
347{
348 struct tb_tunnel *tunnel;
349 struct tb_port *port;
350 struct tb_path *path;
351
352 if (!tb_dp_port_is_enabled(in))
353 return NULL;
354
355 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
356 if (!tunnel)
357 return NULL;
358
359 tunnel->init = tb_dp_xchg_caps;
360 tunnel->activate = tb_dp_activate;
361 tunnel->src_port = in;
362
363 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
364 &tunnel->dst_port, "Video");
365 if (!path) {
366 /* Just disable the DP IN port */
367 tb_dp_port_enable(in, false);
368 goto err_free;
369 }
370 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
371 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
372
373 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
374 if (!path)
375 goto err_deactivate;
376 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
377 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
378
379 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
380 &port, "AUX RX");
381 if (!path)
382 goto err_deactivate;
383 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
384 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
385
386 /* Validate that the tunnel is complete */
387 if (!tb_port_is_dpout(tunnel->dst_port)) {
388 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
389 goto err_deactivate;
390 }
391
392 if (!tb_dp_port_is_enabled(tunnel->dst_port))
393 goto err_deactivate;
394
395 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
396 goto err_deactivate;
397
398 if (port != tunnel->src_port) {
399 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
400 goto err_deactivate;
401 }
402
403 tb_tunnel_dbg(tunnel, "discovered\n");
404 return tunnel;
405
406err_deactivate:
407 tb_tunnel_deactivate(tunnel);
408err_free:
409 tb_tunnel_free(tunnel);
410
411 return NULL;
412}
413
414/**
415 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
416 * @tb: Pointer to the domain structure
417 * @in: DP in adapter port
418 * @out: DP out adapter port
419 *
420 * Allocates a tunnel between @in and @out that is capable of tunneling
421 * Display Port traffic.
422 *
423 * Return: Returns a tb_tunnel on success or NULL on failure.
424 */
425struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
426 struct tb_port *out)
427{
428 struct tb_tunnel *tunnel;
429 struct tb_path **paths;
430 struct tb_path *path;
431
432 if (WARN_ON(!in->cap_adap || !out->cap_adap))
433 return NULL;
434
435 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
436 if (!tunnel)
437 return NULL;
438
439 tunnel->init = tb_dp_xchg_caps;
440 tunnel->activate = tb_dp_activate;
441 tunnel->src_port = in;
442 tunnel->dst_port = out;
443
444 paths = tunnel->paths;
445
446 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
447 1, "Video");
448 if (!path)
449 goto err_free;
450 tb_dp_init_video_path(path, false);
451 paths[TB_DP_VIDEO_PATH_OUT] = path;
452
453 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
454 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
455 if (!path)
456 goto err_free;
457 tb_dp_init_aux_path(path);
458 paths[TB_DP_AUX_PATH_OUT] = path;
459
460 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
461 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
462 if (!path)
463 goto err_free;
464 tb_dp_init_aux_path(path);
465 paths[TB_DP_AUX_PATH_IN] = path;
466
467 return tunnel;
468
469err_free:
470 tb_tunnel_free(tunnel);
471 return NULL;
472}
473
Andreas Noever3364f0c2014-06-03 22:04:08 +0200474/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200475 * tb_tunnel_free() - free a tunnel
476 * @tunnel: Tunnel to be freed
Andreas Noever3364f0c2014-06-03 22:04:08 +0200477 *
478 * The tunnel must have been deactivated.
479 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200480void tb_tunnel_free(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200481{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200482 int i;
483
484 if (!tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200485 return;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200486
487 for (i = 0; i < tunnel->npaths; i++) {
488 if (tunnel->paths[i] && tunnel->paths[i]->activated) {
489 tb_tunnel_WARN(tunnel,
490 "trying to free an activated tunnel\n");
491 return;
492 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200493 }
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200494
495 for (i = 0; i < tunnel->npaths; i++) {
496 if (tunnel->paths[i])
497 tb_path_free(tunnel->paths[i]);
498 }
499
500 kfree(tunnel->paths);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200501 kfree(tunnel);
502}
503
504/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200505 * tb_tunnel_is_invalid - check whether an activated path is still valid
506 * @tunnel: Tunnel to check
Andreas Noever3364f0c2014-06-03 22:04:08 +0200507 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200508bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200509{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200510 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200511
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200512 for (i = 0; i < tunnel->npaths; i++) {
513 WARN_ON(!tunnel->paths[i]->activated);
514 if (tb_path_is_invalid(tunnel->paths[i]))
515 return true;
516 }
517
518 return false;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200519}
520
521/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200522 * tb_tunnel_restart() - activate a tunnel after a hardware reset
523 * @tunnel: Tunnel to restart
Andreas Noever3364f0c2014-06-03 22:04:08 +0200524 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200525 * Return: 0 on success and negative errno in case if failure
Andreas Noever3364f0c2014-06-03 22:04:08 +0200526 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200527int tb_tunnel_restart(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200528{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200529 int res, i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200530
531 tb_tunnel_info(tunnel, "activating\n");
532
Mika Westerbergaae9e272017-02-19 23:37:35 +0200533 /*
534 * Make sure all paths are properly disabled before enabling
535 * them again.
536 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200537 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerbergaae9e272017-02-19 23:37:35 +0200538 if (tunnel->paths[i]->activated) {
539 tb_path_deactivate(tunnel->paths[i]);
540 tunnel->paths[i]->activated = false;
541 }
542 }
543
Mika Westerberg4f807e42018-09-17 16:30:49 +0300544 if (tunnel->init) {
545 res = tunnel->init(tunnel);
546 if (res)
547 return res;
548 }
549
Mika Westerbergaae9e272017-02-19 23:37:35 +0200550 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200551 res = tb_path_activate(tunnel->paths[i]);
552 if (res)
553 goto err;
554 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200555
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200556 if (tunnel->activate) {
557 res = tunnel->activate(tunnel, true);
558 if (res)
559 goto err;
560 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200561
Andreas Noever3364f0c2014-06-03 22:04:08 +0200562 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200563
Andreas Noever3364f0c2014-06-03 22:04:08 +0200564err:
565 tb_tunnel_warn(tunnel, "activation failed\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200566 tb_tunnel_deactivate(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200567 return res;
568}
569
570/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200571 * tb_tunnel_activate() - activate a tunnel
572 * @tunnel: Tunnel to activate
Andreas Noever3364f0c2014-06-03 22:04:08 +0200573 *
574 * Return: Returns 0 on success or an error code on failure.
575 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200576int tb_tunnel_activate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200577{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200578 int i;
579
580 tb_tunnel_info(tunnel, "activating\n");
581
582 for (i = 0; i < tunnel->npaths; i++) {
583 if (tunnel->paths[i]->activated) {
584 tb_tunnel_WARN(tunnel,
585 "trying to activate an already activated tunnel\n");
586 return -EINVAL;
587 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200588 }
589
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200590 return tb_tunnel_restart(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200591}
592
Andreas Noever3364f0c2014-06-03 22:04:08 +0200593/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200594 * tb_tunnel_deactivate() - deactivate a tunnel
595 * @tunnel: Tunnel to deactivate
Andreas Noever3364f0c2014-06-03 22:04:08 +0200596 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200597void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200598{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200599 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200600
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200601 tb_tunnel_info(tunnel, "deactivating\n");
602
603 if (tunnel->activate)
604 tunnel->activate(tunnel, false);
605
606 for (i = 0; i < tunnel->npaths; i++) {
Mika Westerberg0414bec2017-02-19 23:43:26 +0200607 if (tunnel->paths[i] && tunnel->paths[i]->activated)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200608 tb_path_deactivate(tunnel->paths[i]);
609 }
610}