blob: 564e2f42cebd9dc09d9cb4e3147120445df7abee [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noever520b6702014-06-03 22:04:07 +02002/*
Mika Westerberg0414bec2017-02-19 23:43:26 +02003 * Thunderbolt driver - path/tunnel functionality
Andreas Noever520b6702014-06-03 22:04:07 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg0414bec2017-02-19 23:43:26 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever520b6702014-06-03 22:04:07 +02007 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
Mika Westerberg49442692017-02-17 17:05:37 +020011#include <linux/delay.h>
12#include <linux/ktime.h>
Andreas Noever520b6702014-06-03 22:04:07 +020013
14#include "tb.h"
15
Mika Westerberg67551562019-03-06 19:33:23 +020016static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
Andreas Noever520b6702014-06-03 22:04:07 +020017{
Mika Westerberg67551562019-03-06 19:33:23 +020018 const struct tb_port *port = hop->in_port;
19
20 tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
21 hop->in_hop_index, regs->out_port, regs->next_hop);
Mika Westerbergdaa51402018-10-01 12:31:19 +030022 tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
Mika Westerberg67551562019-03-06 19:33:23 +020023 regs->weight, regs->priority,
24 regs->initial_credits, regs->drop_packages);
Mika Westerbergdaa51402018-10-01 12:31:19 +030025 tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
Mika Westerberg67551562019-03-06 19:33:23 +020026 regs->counter_enable, regs->counter);
Mika Westerbergdaa51402018-10-01 12:31:19 +030027 tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
Mika Westerberg67551562019-03-06 19:33:23 +020028 regs->ingress_fc, regs->egress_fc,
29 regs->ingress_shared_buffer, regs->egress_shared_buffer);
Mika Westerbergdaa51402018-10-01 12:31:19 +030030 tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
Mika Westerberg67551562019-03-06 19:33:23 +020031 regs->unknown1, regs->unknown2, regs->unknown3);
Andreas Noever520b6702014-06-03 22:04:07 +020032}
33
Mika Westerberg0414bec2017-02-19 23:43:26 +020034static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
35 int dst_hopid)
36{
37 struct tb_port *port, *out_port = NULL;
38 struct tb_regs_hop hop;
39 struct tb_switch *sw;
40 int i, ret, hopid;
41
42 hopid = src_hopid;
43 port = src;
44
45 for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
46 sw = port->sw;
47
48 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
49 if (ret) {
50 tb_port_warn(port, "failed to read path at %d\n", hopid);
51 return NULL;
52 }
53
54 if (!hop.enable)
55 return NULL;
56
57 out_port = &sw->ports[hop.out_port];
58 hopid = hop.next_hop;
59 port = out_port->remote;
60 }
61
62 return out_port && hopid == dst_hopid ? out_port : NULL;
63}
64
65static int tb_path_find_src_hopid(struct tb_port *src,
66 const struct tb_port *dst, int dst_hopid)
67{
68 struct tb_port *out;
69 int i;
70
71 for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
72 out = tb_path_find_dst_port(src, i, dst_hopid);
73 if (out == dst)
74 return i;
75 }
76
77 return 0;
78}
79
80/**
81 * tb_path_discover() - Discover a path
82 * @src: First input port of a path
83 * @src_hopid: Starting HopID of a path (%-1 if don't care)
84 * @dst: Expected destination port of the path (%NULL if don't care)
85 * @dst_hopid: HopID to the @dst (%-1 if don't care)
86 * @last: Last port is filled here if not %NULL
87 * @name: Name of the path
88 *
89 * Follows a path starting from @src and @src_hopid to the last output
90 * port of the path. Allocates HopIDs for the visited ports. Call
91 * tb_path_free() to release the path and allocated HopIDs when the path
92 * is not needed anymore.
93 *
94 * Note function discovers also incomplete paths so caller should check
95 * that the @dst port is the expected one. If it is not, the path can be
96 * cleaned up by calling tb_path_deactivate() before tb_path_free().
97 *
98 * Return: Discovered path on success, %NULL in case of failure
99 */
100struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
101 struct tb_port *dst, int dst_hopid,
102 struct tb_port **last, const char *name)
103{
104 struct tb_port *out_port;
105 struct tb_regs_hop hop;
106 struct tb_path *path;
107 struct tb_switch *sw;
108 struct tb_port *p;
109 size_t num_hops;
110 int ret, i, h;
111
112 if (src_hopid < 0 && dst) {
113 /*
114 * For incomplete paths the intermediate HopID can be
115 * different from the one used by the protocol adapter
116 * so in that case find a path that ends on @dst with
117 * matching @dst_hopid. That should give us the correct
118 * HopID for the @src.
119 */
120 src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
121 if (!src_hopid)
122 return NULL;
123 }
124
125 p = src;
126 h = src_hopid;
127 num_hops = 0;
128
129 for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
130 sw = p->sw;
131
132 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
133 if (ret) {
134 tb_port_warn(p, "failed to read path at %d\n", h);
135 return NULL;
136 }
137
138 /* If the hop is not enabled we got an incomplete path */
139 if (!hop.enable)
140 break;
141
142 out_port = &sw->ports[hop.out_port];
143 if (last)
144 *last = out_port;
145
146 h = hop.next_hop;
147 p = out_port->remote;
148 num_hops++;
149 }
150
151 path = kzalloc(sizeof(*path), GFP_KERNEL);
152 if (!path)
153 return NULL;
154
155 path->name = name;
156 path->tb = src->sw->tb;
157 path->path_length = num_hops;
158 path->activated = true;
159
160 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
161 if (!path->hops) {
162 kfree(path);
163 return NULL;
164 }
165
166 p = src;
167 h = src_hopid;
168
169 for (i = 0; i < num_hops; i++) {
170 int next_hop;
171
172 sw = p->sw;
173
174 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
175 if (ret) {
176 tb_port_warn(p, "failed to read path at %d\n", h);
177 goto err;
178 }
179
180 if (tb_port_alloc_in_hopid(p, h, h) < 0)
181 goto err;
182
183 out_port = &sw->ports[hop.out_port];
184 next_hop = hop.next_hop;
185
186 if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
187 tb_port_release_in_hopid(p, h);
188 goto err;
189 }
190
191 path->hops[i].in_port = p;
192 path->hops[i].in_hop_index = h;
193 path->hops[i].in_counter_index = -1;
194 path->hops[i].out_port = out_port;
195 path->hops[i].next_hop_index = next_hop;
196
197 h = next_hop;
198 p = out_port->remote;
199 }
200
201 return path;
202
203err:
204 tb_port_warn(src, "failed to discover path starting at HopID %d\n",
205 src_hopid);
206 tb_path_free(path);
207 return NULL;
208}
209
Andreas Noever520b6702014-06-03 22:04:07 +0200210/**
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200211 * tb_path_alloc() - allocate a thunderbolt path between two ports
212 * @tb: Domain pointer
213 * @src: Source port of the path
214 * @src_hopid: HopID used for the first ingress port in the path
215 * @dst: Destination port of the path
216 * @dst_hopid: HopID used for the last egress port in the path
217 * @link_nr: Preferred link if there are dual links on the path
218 * @name: Name of the path
219 *
220 * Creates path between two ports starting with given @src_hopid. Reserves
221 * HopIDs for each port (they can be different from @src_hopid depending on
222 * how many HopIDs each port already have reserved). If there are dual
Mika Westerberg91c0c122019-03-21 19:03:00 +0200223 * links on the path, prioritizes using @link_nr but takes into account
224 * that the lanes may be bonded.
Andreas Noever520b6702014-06-03 22:04:07 +0200225 *
226 * Return: Returns a tb_path on success or NULL on failure.
227 */
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200228struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
229 struct tb_port *dst, int dst_hopid, int link_nr,
230 const char *name)
Andreas Noever520b6702014-06-03 22:04:07 +0200231{
Mika Westerberg7e897bb2020-05-17 10:44:31 +0300232 struct tb_port *in_port, *out_port, *first_port, *last_port;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200233 int in_hopid, out_hopid;
234 struct tb_path *path;
235 size_t num_hops;
236 int i, ret;
237
238 path = kzalloc(sizeof(*path), GFP_KERNEL);
Andreas Noever520b6702014-06-03 22:04:07 +0200239 if (!path)
240 return NULL;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200241
Mika Westerberg7e897bb2020-05-17 10:44:31 +0300242 first_port = last_port = NULL;
Mika Westerbergc64c3f32020-04-29 17:07:59 +0300243 i = 0;
Mika Westerberg7e897bb2020-05-17 10:44:31 +0300244 tb_for_each_port_on_path(src, dst, in_port) {
245 if (!first_port)
246 first_port = in_port;
247 last_port = in_port;
Mika Westerbergc64c3f32020-04-29 17:07:59 +0300248 i++;
Mika Westerberg7e897bb2020-05-17 10:44:31 +0300249 }
250
251 /* Check that src and dst are reachable */
252 if (first_port != src || last_port != dst) {
253 kfree(path);
254 return NULL;
255 }
Mika Westerbergc64c3f32020-04-29 17:07:59 +0300256
257 /* Each hop takes two ports */
258 num_hops = i / 2;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200259
Andreas Noever520b6702014-06-03 22:04:07 +0200260 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
261 if (!path->hops) {
262 kfree(path);
263 return NULL;
264 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200265
266 in_hopid = src_hopid;
267 out_port = NULL;
268
269 for (i = 0; i < num_hops; i++) {
270 in_port = tb_next_port_on_path(src, dst, out_port);
271 if (!in_port)
272 goto err;
273
Mika Westerberg91c0c122019-03-21 19:03:00 +0200274 /* When lanes are bonded primary link must be used */
275 if (!in_port->bonded && in_port->dual_link_port &&
276 in_port->link_nr != link_nr)
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200277 in_port = in_port->dual_link_port;
278
279 ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
280 if (ret < 0)
281 goto err;
282 in_hopid = ret;
283
284 out_port = tb_next_port_on_path(src, dst, in_port);
285 if (!out_port)
286 goto err;
287
Mika Westerberg91c0c122019-03-21 19:03:00 +0200288 /*
289 * Pick up right port when going from non-bonded to
290 * bonded or from bonded to non-bonded.
291 */
292 if (out_port->dual_link_port) {
293 if (!in_port->bonded && out_port->bonded &&
294 out_port->link_nr) {
295 /*
296 * Use primary link when going from
297 * non-bonded to bonded.
298 */
299 out_port = out_port->dual_link_port;
300 } else if (!out_port->bonded &&
301 out_port->link_nr != link_nr) {
302 /*
303 * If out port is not bonded follow
304 * link_nr.
305 */
306 out_port = out_port->dual_link_port;
307 }
308 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200309
310 if (i == num_hops - 1)
311 ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
312 dst_hopid);
313 else
314 ret = tb_port_alloc_out_hopid(out_port, -1, -1);
315
316 if (ret < 0)
317 goto err;
318 out_hopid = ret;
319
320 path->hops[i].in_hop_index = in_hopid;
321 path->hops[i].in_port = in_port;
322 path->hops[i].in_counter_index = -1;
323 path->hops[i].out_port = out_port;
324 path->hops[i].next_hop_index = out_hopid;
325
326 in_hopid = out_hopid;
327 }
328
Andreas Noever520b6702014-06-03 22:04:07 +0200329 path->tb = tb;
330 path->path_length = num_hops;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200331 path->name = name;
332
Andreas Noever520b6702014-06-03 22:04:07 +0200333 return path;
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200334
335err:
336 tb_path_free(path);
337 return NULL;
Andreas Noever520b6702014-06-03 22:04:07 +0200338}
339
340/**
Mika Westerbergab9f31c2019-03-06 18:21:08 +0200341 * tb_path_free() - free a path
342 * @path: Path to free
343 *
344 * Frees a path. The path does not need to be deactivated.
Andreas Noever520b6702014-06-03 22:04:07 +0200345 */
346void tb_path_free(struct tb_path *path)
347{
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200348 int i;
349
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200350 for (i = 0; i < path->path_length; i++) {
351 const struct tb_path_hop *hop = &path->hops[i];
352
353 if (hop->in_port)
354 tb_port_release_in_hopid(hop->in_port,
355 hop->in_hop_index);
356 if (hop->out_port)
357 tb_port_release_out_hopid(hop->out_port,
358 hop->next_hop_index);
359 }
360
Andreas Noever520b6702014-06-03 22:04:07 +0200361 kfree(path->hops);
362 kfree(path);
363}
364
365static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
366{
367 int i, res;
368 for (i = first_hop; i < path->path_length; i++) {
369 res = tb_port_add_nfc_credits(path->hops[i].in_port,
Mika Westerberg02c5e7c2020-12-10 16:07:59 +0200370 -path->hops[i].nfc_credits);
Andreas Noever520b6702014-06-03 22:04:07 +0200371 if (res)
372 tb_port_warn(path->hops[i].in_port,
373 "nfc credits deallocation failed for hop %d\n",
374 i);
375 }
376}
377
Mika Westerberg44242d62018-09-28 16:35:32 +0300378static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
379 bool clear_fc)
Mika Westerberg49442692017-02-17 17:05:37 +0200380{
381 struct tb_regs_hop hop;
382 ktime_t timeout;
383 int ret;
384
385 /* Disable the path */
386 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
387 if (ret)
388 return ret;
389
390 /* Already disabled */
391 if (!hop.enable)
392 return 0;
393
394 hop.enable = 0;
395
396 ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
397 if (ret)
398 return ret;
399
400 /* Wait until it is drained */
401 timeout = ktime_add_ms(ktime_get(), 500);
402 do {
403 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
404 if (ret)
405 return ret;
406
Mika Westerberg44242d62018-09-28 16:35:32 +0300407 if (!hop.pending) {
408 if (clear_fc) {
Mika Westerberg81816f52020-10-07 17:08:47 +0300409 /*
410 * Clear flow control. Protocol adapters
411 * IFC and ISE bits are vendor defined
412 * in the USB4 spec so we clear them
413 * only for pre-USB4 adapters.
414 */
415 if (!tb_switch_is_usb4(port->sw)) {
416 hop.ingress_fc = 0;
417 hop.ingress_shared_buffer = 0;
418 }
Mika Westerberg44242d62018-09-28 16:35:32 +0300419 hop.egress_fc = 0;
Mika Westerberg44242d62018-09-28 16:35:32 +0300420 hop.egress_shared_buffer = 0;
421
422 return tb_port_write(port, &hop, TB_CFG_HOPS,
423 2 * hop_index, 2);
424 }
425
Mika Westerberg49442692017-02-17 17:05:37 +0200426 return 0;
Mika Westerberg44242d62018-09-28 16:35:32 +0300427 }
Mika Westerberg49442692017-02-17 17:05:37 +0200428
429 usleep_range(10, 20);
430 } while (ktime_before(ktime_get(), timeout));
431
432 return -ETIMEDOUT;
433}
434
Andreas Noever520b6702014-06-03 22:04:07 +0200435static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
436{
437 int i, res;
Mika Westerberg49442692017-02-17 17:05:37 +0200438
Andreas Noever520b6702014-06-03 22:04:07 +0200439 for (i = first_hop; i < path->path_length; i++) {
Mika Westerberg49442692017-02-17 17:05:37 +0200440 res = __tb_path_deactivate_hop(path->hops[i].in_port,
Mika Westerberg44242d62018-09-28 16:35:32 +0300441 path->hops[i].in_hop_index,
442 path->clear_fc);
Mika Westerberg49442692017-02-17 17:05:37 +0200443 if (res && res != -ENODEV)
Andreas Noever520b6702014-06-03 22:04:07 +0200444 tb_port_warn(path->hops[i].in_port,
445 "hop deactivation failed for hop %d, index %d\n",
446 i, path->hops[i].in_hop_index);
447 }
448}
449
450void tb_path_deactivate(struct tb_path *path)
451{
452 if (!path->activated) {
453 tb_WARN(path->tb, "trying to deactivate an inactive path\n");
454 return;
455 }
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200456 tb_dbg(path->tb,
Mika Westerberga3595252020-11-17 13:41:21 +0300457 "deactivating %s path from %llx:%u to %llx:%u\n",
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200458 path->name, tb_route(path->hops[0].in_port->sw),
459 path->hops[0].in_port->port,
460 tb_route(path->hops[path->path_length - 1].out_port->sw),
461 path->hops[path->path_length - 1].out_port->port);
Andreas Noever520b6702014-06-03 22:04:07 +0200462 __tb_path_deactivate_hops(path, 0);
463 __tb_path_deallocate_nfc(path, 0);
464 path->activated = false;
465}
466
467/**
468 * tb_path_activate() - activate a path
Mika Westerberg5fbcb2d2021-01-28 13:23:44 +0300469 * @path: Path to activate
Andreas Noever520b6702014-06-03 22:04:07 +0200470 *
471 * Activate a path starting with the last hop and iterating backwards. The
472 * caller must fill path->hops before calling tb_path_activate().
473 *
474 * Return: Returns 0 on success or an error code on failure.
475 */
476int tb_path_activate(struct tb_path *path)
477{
478 int i, res;
479 enum tb_path_port out_mask, in_mask;
480 if (path->activated) {
481 tb_WARN(path->tb, "trying to activate already activated path\n");
482 return -EINVAL;
483 }
484
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200485 tb_dbg(path->tb,
Mika Westerberga3595252020-11-17 13:41:21 +0300486 "activating %s path from %llx:%u to %llx:%u\n",
Mika Westerberg8c7acaaf2017-02-19 22:11:41 +0200487 path->name, tb_route(path->hops[0].in_port->sw),
488 path->hops[0].in_port->port,
489 tb_route(path->hops[path->path_length - 1].out_port->sw),
490 path->hops[path->path_length - 1].out_port->port);
Andreas Noever520b6702014-06-03 22:04:07 +0200491
492 /* Clear counters. */
493 for (i = path->path_length - 1; i >= 0; i--) {
494 if (path->hops[i].in_counter_index == -1)
495 continue;
496 res = tb_port_clear_counter(path->hops[i].in_port,
497 path->hops[i].in_counter_index);
498 if (res)
499 goto err;
500 }
501
502 /* Add non flow controlled credits. */
503 for (i = path->path_length - 1; i >= 0; i--) {
504 res = tb_port_add_nfc_credits(path->hops[i].in_port,
Mika Westerberg02c5e7c2020-12-10 16:07:59 +0200505 path->hops[i].nfc_credits);
Andreas Noever520b6702014-06-03 22:04:07 +0200506 if (res) {
507 __tb_path_deallocate_nfc(path, i);
508 goto err;
509 }
510 }
511
512 /* Activate hops. */
513 for (i = path->path_length - 1; i >= 0; i--) {
Andreas Noever72ad3662014-08-26 17:42:21 +0200514 struct tb_regs_hop hop = { 0 };
515
Mika Westerberg0414bec2017-02-19 23:43:26 +0200516 /* If it is left active deactivate it first */
517 __tb_path_deactivate_hop(path->hops[i].in_port,
Mika Westerberg44242d62018-09-28 16:35:32 +0300518 path->hops[i].in_hop_index, path->clear_fc);
Andreas Noever520b6702014-06-03 22:04:07 +0200519
520 /* dword 0 */
521 hop.next_hop = path->hops[i].next_hop_index;
522 hop.out_port = path->hops[i].out_port->port;
Mika Westerberg0414bec2017-02-19 23:43:26 +0200523 hop.initial_credits = path->hops[i].initial_credits;
Andreas Noever520b6702014-06-03 22:04:07 +0200524 hop.unknown1 = 0;
525 hop.enable = 1;
526
527 /* dword 1 */
528 out_mask = (i == path->path_length - 1) ?
529 TB_PATH_DESTINATION : TB_PATH_INTERNAL;
530 in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
531 hop.weight = path->weight;
532 hop.unknown2 = 0;
533 hop.priority = path->priority;
534 hop.drop_packages = path->drop_packages;
535 hop.counter = path->hops[i].in_counter_index;
536 hop.counter_enable = path->hops[i].in_counter_index != -1;
537 hop.ingress_fc = path->ingress_fc_enable & in_mask;
538 hop.egress_fc = path->egress_fc_enable & out_mask;
539 hop.ingress_shared_buffer = path->ingress_shared_buffer
540 & in_mask;
541 hop.egress_shared_buffer = path->egress_shared_buffer
542 & out_mask;
543 hop.unknown3 = 0;
544
Mika Westerberg67551562019-03-06 19:33:23 +0200545 tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
546 tb_dump_hop(&path->hops[i], &hop);
Andreas Noever520b6702014-06-03 22:04:07 +0200547 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
548 2 * path->hops[i].in_hop_index, 2);
549 if (res) {
550 __tb_path_deactivate_hops(path, i);
551 __tb_path_deallocate_nfc(path, 0);
552 goto err;
553 }
554 }
555 path->activated = true;
Mika Westerberg62efe692018-09-17 16:32:13 +0300556 tb_dbg(path->tb, "path activation complete\n");
Andreas Noever520b6702014-06-03 22:04:07 +0200557 return 0;
558err:
559 tb_WARN(path->tb, "path activation failed\n");
560 return res;
561}
562
563/**
564 * tb_path_is_invalid() - check whether any ports on the path are invalid
Mika Westerberg5fbcb2d2021-01-28 13:23:44 +0300565 * @path: Path to check
Andreas Noever520b6702014-06-03 22:04:07 +0200566 *
567 * Return: Returns true if the path is invalid, false otherwise.
568 */
569bool tb_path_is_invalid(struct tb_path *path)
570{
571 int i = 0;
572 for (i = 0; i < path->path_length; i++) {
573 if (path->hops[i].in_port->sw->is_unplugged)
574 return true;
575 if (path->hops[i].out_port->sw->is_unplugged)
576 return true;
577 }
578 return false;
579}
Mika Westerberga11b88a2019-03-26 16:03:48 +0300580
581/**
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200582 * tb_path_port_on_path() - Does the path go through certain port
Mika Westerberga11b88a2019-03-26 16:03:48 +0300583 * @path: Path to check
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200584 * @port: Switch to check
Mika Westerberga11b88a2019-03-26 16:03:48 +0300585 *
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200586 * Goes over all hops on path and checks if @port is any of them.
Mika Westerberga11b88a2019-03-26 16:03:48 +0300587 * Direction does not matter.
588 */
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200589bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
Mika Westerberga11b88a2019-03-26 16:03:48 +0300590{
591 int i;
592
593 for (i = 0; i < path->path_length; i++) {
Mika Westerberg0bd680c2020-03-24 14:44:13 +0200594 if (path->hops[i].in_port == port ||
595 path->hops[i].out_port == port)
Mika Westerberga11b88a2019-03-26 16:03:48 +0300596 return true;
597 }
598
599 return false;
600}