blob: 1f69bab236ee9c81edc9735cae630285f369ec63 [file] [log] [blame]
Mika Westerberg54509f52020-04-29 16:38:39 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 * KUnit tests
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <kunit/test.h>
10#include <linux/idr.h>
11
12#include "tb.h"
Mika Westerberg40c14d92020-05-04 17:10:40 +030013#include "tunnel.h"
Mika Westerberg54509f52020-04-29 16:38:39 +030014
15static int __ida_init(struct kunit_resource *res, void *context)
16{
17 struct ida *ida = context;
18
19 ida_init(ida);
Stephen Rothwell71fa1a42020-06-30 15:51:50 +100020 res->data = ida;
Mika Westerberg54509f52020-04-29 16:38:39 +030021 return 0;
22}
23
24static void __ida_destroy(struct kunit_resource *res)
25{
Stephen Rothwell71fa1a42020-06-30 15:51:50 +100026 struct ida *ida = res->data;
Mika Westerberg54509f52020-04-29 16:38:39 +030027
28 ida_destroy(ida);
29}
30
31static void kunit_ida_init(struct kunit *test, struct ida *ida)
32{
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
34}
35
36static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
38{
39 struct tb_switch *sw;
40 size_t size;
41 int i;
42
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
44 if (!sw)
45 return NULL;
46
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
53
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
56 if (!sw->ports)
57 return NULL;
58
59 for (i = 0; i <= sw->config.max_port_number; i++) {
60 sw->ports[i].sw = sw;
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
63 if (i) {
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
66 }
67 }
68
69 return sw;
70}
71
72static struct tb_switch *alloc_host(struct kunit *test)
73{
74 struct tb_switch *sw;
75
76 sw = alloc_switch(test, 0, 7, 13);
77 if (!sw)
78 return NULL;
79
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
82
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
86
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +030090 sw->ports[1].total_credits = 60;
91 sw->ports[1].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +030092 sw->ports[1].dual_link_port = &sw->ports[2];
93
94 sw->ports[2].config.type = TB_TYPE_PORT;
95 sw->ports[2].config.max_in_hop_id = 19;
96 sw->ports[2].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +030097 sw->ports[2].total_credits = 60;
98 sw->ports[2].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +030099 sw->ports[2].dual_link_port = &sw->ports[1];
100 sw->ports[2].link_nr = 1;
101
102 sw->ports[3].config.type = TB_TYPE_PORT;
103 sw->ports[3].config.max_in_hop_id = 19;
104 sw->ports[3].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300105 sw->ports[3].total_credits = 60;
106 sw->ports[3].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300107 sw->ports[3].dual_link_port = &sw->ports[4];
108
109 sw->ports[4].config.type = TB_TYPE_PORT;
110 sw->ports[4].config.max_in_hop_id = 19;
111 sw->ports[4].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300112 sw->ports[4].total_credits = 60;
113 sw->ports[4].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300114 sw->ports[4].dual_link_port = &sw->ports[3];
115 sw->ports[4].link_nr = 1;
116
117 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
118 sw->ports[5].config.max_in_hop_id = 9;
119 sw->ports[5].config.max_out_hop_id = 9;
120 sw->ports[5].cap_adap = -1;
121
122 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
123 sw->ports[6].config.max_in_hop_id = 9;
124 sw->ports[6].config.max_out_hop_id = 9;
125 sw->ports[6].cap_adap = -1;
126
127 sw->ports[7].config.type = TB_TYPE_NHI;
128 sw->ports[7].config.max_in_hop_id = 11;
129 sw->ports[7].config.max_out_hop_id = 11;
Mika Westerberg5adab6c2021-01-08 16:32:19 +0200130 sw->ports[7].config.nfc_credits = 0x41800000;
Mika Westerberg54509f52020-04-29 16:38:39 +0300131
132 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
133 sw->ports[8].config.max_in_hop_id = 8;
134 sw->ports[8].config.max_out_hop_id = 8;
135
136 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
137 sw->ports[9].config.max_in_hop_id = 8;
138 sw->ports[9].config.max_out_hop_id = 8;
139
140 sw->ports[10].disabled = true;
141 sw->ports[11].disabled = true;
142
143 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
144 sw->ports[12].config.max_in_hop_id = 8;
145 sw->ports[12].config.max_out_hop_id = 8;
146
147 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
148 sw->ports[13].config.max_in_hop_id = 8;
149 sw->ports[13].config.max_out_hop_id = 8;
150
151 return sw;
152}
153
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300154static struct tb_switch *alloc_host_usb4(struct kunit *test)
155{
156 struct tb_switch *sw;
157
158 sw = alloc_host(test);
159 if (!sw)
160 return NULL;
161
162 sw->generation = 4;
163 sw->credit_allocation = true;
164 sw->max_usb3_credits = 32;
165 sw->min_dp_aux_credits = 1;
166 sw->min_dp_main_credits = 0;
167 sw->max_pcie_credits = 64;
168 sw->max_dma_credits = 14;
169
170 return sw;
171}
172
Mika Westerberg54509f52020-04-29 16:38:39 +0300173static struct tb_switch *alloc_dev_default(struct kunit *test,
174 struct tb_switch *parent,
175 u64 route, bool bonded)
176{
177 struct tb_port *port, *upstream_port;
178 struct tb_switch *sw;
179
180 sw = alloc_switch(test, route, 1, 19);
181 if (!sw)
182 return NULL;
183
184 sw->config.vendor_id = 0x8086;
185 sw->config.device_id = 0x15ef;
186
187 sw->ports[0].config.type = TB_TYPE_PORT;
188 sw->ports[0].config.max_in_hop_id = 8;
189 sw->ports[0].config.max_out_hop_id = 8;
190
191 sw->ports[1].config.type = TB_TYPE_PORT;
192 sw->ports[1].config.max_in_hop_id = 19;
193 sw->ports[1].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300194 sw->ports[1].total_credits = 60;
195 sw->ports[1].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300196 sw->ports[1].dual_link_port = &sw->ports[2];
197
198 sw->ports[2].config.type = TB_TYPE_PORT;
199 sw->ports[2].config.max_in_hop_id = 19;
200 sw->ports[2].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300201 sw->ports[2].total_credits = 60;
202 sw->ports[2].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300203 sw->ports[2].dual_link_port = &sw->ports[1];
204 sw->ports[2].link_nr = 1;
205
206 sw->ports[3].config.type = TB_TYPE_PORT;
207 sw->ports[3].config.max_in_hop_id = 19;
208 sw->ports[3].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300209 sw->ports[3].total_credits = 60;
210 sw->ports[3].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300211 sw->ports[3].dual_link_port = &sw->ports[4];
212
213 sw->ports[4].config.type = TB_TYPE_PORT;
214 sw->ports[4].config.max_in_hop_id = 19;
215 sw->ports[4].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300216 sw->ports[4].total_credits = 60;
217 sw->ports[4].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300218 sw->ports[4].dual_link_port = &sw->ports[3];
219 sw->ports[4].link_nr = 1;
220
221 sw->ports[5].config.type = TB_TYPE_PORT;
222 sw->ports[5].config.max_in_hop_id = 19;
223 sw->ports[5].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300224 sw->ports[5].total_credits = 60;
225 sw->ports[5].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300226 sw->ports[5].dual_link_port = &sw->ports[6];
227
228 sw->ports[6].config.type = TB_TYPE_PORT;
229 sw->ports[6].config.max_in_hop_id = 19;
230 sw->ports[6].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300231 sw->ports[6].total_credits = 60;
232 sw->ports[6].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300233 sw->ports[6].dual_link_port = &sw->ports[5];
234 sw->ports[6].link_nr = 1;
235
236 sw->ports[7].config.type = TB_TYPE_PORT;
237 sw->ports[7].config.max_in_hop_id = 19;
238 sw->ports[7].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300239 sw->ports[7].total_credits = 60;
240 sw->ports[7].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300241 sw->ports[7].dual_link_port = &sw->ports[8];
242
243 sw->ports[8].config.type = TB_TYPE_PORT;
244 sw->ports[8].config.max_in_hop_id = 19;
245 sw->ports[8].config.max_out_hop_id = 19;
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300246 sw->ports[8].total_credits = 60;
247 sw->ports[8].ctl_credits = 2;
Mika Westerberg54509f52020-04-29 16:38:39 +0300248 sw->ports[8].dual_link_port = &sw->ports[7];
249 sw->ports[8].link_nr = 1;
250
251 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
252 sw->ports[9].config.max_in_hop_id = 8;
253 sw->ports[9].config.max_out_hop_id = 8;
254
255 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
256 sw->ports[10].config.max_in_hop_id = 8;
257 sw->ports[10].config.max_out_hop_id = 8;
258
259 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
260 sw->ports[11].config.max_in_hop_id = 8;
261 sw->ports[11].config.max_out_hop_id = 8;
262
263 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
264 sw->ports[12].config.max_in_hop_id = 8;
265 sw->ports[12].config.max_out_hop_id = 8;
266
267 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
268 sw->ports[13].config.max_in_hop_id = 9;
269 sw->ports[13].config.max_out_hop_id = 9;
270 sw->ports[13].cap_adap = -1;
271
272 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
273 sw->ports[14].config.max_in_hop_id = 9;
274 sw->ports[14].config.max_out_hop_id = 9;
275 sw->ports[14].cap_adap = -1;
276
277 sw->ports[15].disabled = true;
278
279 sw->ports[16].config.type = TB_TYPE_USB3_UP;
280 sw->ports[16].config.max_in_hop_id = 8;
281 sw->ports[16].config.max_out_hop_id = 8;
282
283 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
284 sw->ports[17].config.max_in_hop_id = 8;
285 sw->ports[17].config.max_out_hop_id = 8;
286
287 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
288 sw->ports[18].config.max_in_hop_id = 8;
289 sw->ports[18].config.max_out_hop_id = 8;
290
291 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
292 sw->ports[19].config.max_in_hop_id = 8;
293 sw->ports[19].config.max_out_hop_id = 8;
294
295 if (!parent)
296 return sw;
297
298 /* Link them */
299 upstream_port = tb_upstream_port(sw);
300 port = tb_port_at(route, parent);
301 port->remote = upstream_port;
302 upstream_port->remote = port;
303 if (port->dual_link_port && upstream_port->dual_link_port) {
304 port->dual_link_port->remote = upstream_port->dual_link_port;
305 upstream_port->dual_link_port->remote = port->dual_link_port;
Mika Westerberg54509f52020-04-29 16:38:39 +0300306
Mika Westerberga0d36fa2021-06-07 13:37:46 +0300307 if (bonded) {
308 /* Bonding is used */
309 port->bonded = true;
310 port->total_credits *= 2;
311 port->dual_link_port->bonded = true;
312 port->dual_link_port->total_credits = 0;
313 upstream_port->bonded = true;
314 upstream_port->total_credits *= 2;
315 upstream_port->dual_link_port->bonded = true;
316 upstream_port->dual_link_port->total_credits = 0;
317 }
Mika Westerberg54509f52020-04-29 16:38:39 +0300318 }
319
320 return sw;
321}
322
323static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
324 struct tb_switch *parent,
325 u64 route, bool bonded)
326{
327 struct tb_switch *sw;
328
329 sw = alloc_dev_default(test, parent, route, bonded);
330 if (!sw)
331 return NULL;
332
333 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
334 sw->ports[13].config.max_in_hop_id = 9;
335 sw->ports[13].config.max_out_hop_id = 9;
336
337 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
338 sw->ports[14].config.max_in_hop_id = 9;
339 sw->ports[14].config.max_out_hop_id = 9;
340
341 return sw;
342}
343
Mika Westerbergbfa8f782021-04-28 19:04:02 +0300344static struct tb_switch *alloc_dev_usb4(struct kunit *test,
345 struct tb_switch *parent,
346 u64 route, bool bonded)
347{
348 struct tb_switch *sw;
349
350 sw = alloc_dev_default(test, parent, route, bonded);
351 if (!sw)
352 return NULL;
353
354 sw->generation = 4;
355 sw->credit_allocation = true;
356 sw->max_usb3_credits = 14;
357 sw->min_dp_aux_credits = 1;
358 sw->min_dp_main_credits = 18;
359 sw->max_pcie_credits = 32;
360 sw->max_dma_credits = 14;
361
362 return sw;
363}
364
Mika Westerberg54509f52020-04-29 16:38:39 +0300365static void tb_test_path_basic(struct kunit *test)
366{
367 struct tb_port *src_port, *dst_port, *p;
368 struct tb_switch *host;
369
370 host = alloc_host(test);
371
372 src_port = &host->ports[5];
373 dst_port = src_port;
374
375 p = tb_next_port_on_path(src_port, dst_port, NULL);
376 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
377
378 p = tb_next_port_on_path(src_port, dst_port, p);
379 KUNIT_EXPECT_TRUE(test, !p);
380}
381
382static void tb_test_path_not_connected_walk(struct kunit *test)
383{
384 struct tb_port *src_port, *dst_port, *p;
385 struct tb_switch *host, *dev;
386
387 host = alloc_host(test);
388 /* No connection between host and dev */
389 dev = alloc_dev_default(test, NULL, 3, true);
390
391 src_port = &host->ports[12];
392 dst_port = &dev->ports[16];
393
394 p = tb_next_port_on_path(src_port, dst_port, NULL);
395 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
396
397 p = tb_next_port_on_path(src_port, dst_port, p);
398 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
399
400 p = tb_next_port_on_path(src_port, dst_port, p);
401 KUNIT_EXPECT_TRUE(test, !p);
402
403 /* Other direction */
404
405 p = tb_next_port_on_path(dst_port, src_port, NULL);
406 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
407
408 p = tb_next_port_on_path(dst_port, src_port, p);
409 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
410
411 p = tb_next_port_on_path(dst_port, src_port, p);
412 KUNIT_EXPECT_TRUE(test, !p);
413}
414
415struct port_expectation {
416 u64 route;
417 u8 port;
418 enum tb_port_type type;
419};
420
421static void tb_test_path_single_hop_walk(struct kunit *test)
422{
423 /*
424 * Walks from Host PCIe downstream port to Device #1 PCIe
425 * upstream port.
426 *
427 * [Host]
428 * 1 |
429 * 1 |
430 * [Device]
431 */
432 static const struct port_expectation test_data[] = {
433 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
434 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
435 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
436 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
437 };
438 struct tb_port *src_port, *dst_port, *p;
439 struct tb_switch *host, *dev;
440 int i;
441
442 host = alloc_host(test);
443 dev = alloc_dev_default(test, host, 1, true);
444
445 src_port = &host->ports[8];
446 dst_port = &dev->ports[9];
447
448 /* Walk both directions */
449
450 i = 0;
451 tb_for_each_port_on_path(src_port, dst_port, p) {
452 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
453 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
454 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700455 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
456 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300457 i++;
458 }
459
David Gow8f0877c2021-05-13 12:32:01 -0700460 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300461
462 i = ARRAY_SIZE(test_data) - 1;
463 tb_for_each_port_on_path(dst_port, src_port, p) {
464 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
465 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
466 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700467 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
468 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300469 i--;
470 }
471
472 KUNIT_EXPECT_EQ(test, i, -1);
473}
474
475static void tb_test_path_daisy_chain_walk(struct kunit *test)
476{
477 /*
478 * Walks from Host DP IN to Device #2 DP OUT.
479 *
480 * [Host]
481 * 1 |
482 * 1 |
483 * [Device #1]
484 * 3 /
485 * 1 /
486 * [Device #2]
487 */
488 static const struct port_expectation test_data[] = {
489 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
490 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
491 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
492 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
493 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
494 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
495 };
496 struct tb_port *src_port, *dst_port, *p;
497 struct tb_switch *host, *dev1, *dev2;
498 int i;
499
500 host = alloc_host(test);
501 dev1 = alloc_dev_default(test, host, 0x1, true);
502 dev2 = alloc_dev_default(test, dev1, 0x301, true);
503
504 src_port = &host->ports[5];
505 dst_port = &dev2->ports[13];
506
507 /* Walk both directions */
508
509 i = 0;
510 tb_for_each_port_on_path(src_port, dst_port, p) {
511 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
512 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
513 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700514 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
515 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300516 i++;
517 }
518
David Gow8f0877c2021-05-13 12:32:01 -0700519 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300520
521 i = ARRAY_SIZE(test_data) - 1;
522 tb_for_each_port_on_path(dst_port, src_port, p) {
523 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
524 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
525 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700526 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
527 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300528 i--;
529 }
530
531 KUNIT_EXPECT_EQ(test, i, -1);
532}
533
534static void tb_test_path_simple_tree_walk(struct kunit *test)
535{
536 /*
537 * Walks from Host DP IN to Device #3 DP OUT.
538 *
539 * [Host]
540 * 1 |
541 * 1 |
542 * [Device #1]
543 * 3 / | 5 \ 7
544 * 1 / | \ 1
545 * [Device #2] | [Device #4]
546 * | 1
547 * [Device #3]
548 */
549 static const struct port_expectation test_data[] = {
550 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
551 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
552 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
553 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
554 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
555 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
556 };
557 struct tb_port *src_port, *dst_port, *p;
558 struct tb_switch *host, *dev1, *dev3;
559 int i;
560
561 host = alloc_host(test);
562 dev1 = alloc_dev_default(test, host, 0x1, true);
563 alloc_dev_default(test, dev1, 0x301, true);
564 dev3 = alloc_dev_default(test, dev1, 0x501, true);
565 alloc_dev_default(test, dev1, 0x701, true);
566
567 src_port = &host->ports[5];
568 dst_port = &dev3->ports[13];
569
570 /* Walk both directions */
571
572 i = 0;
573 tb_for_each_port_on_path(src_port, dst_port, p) {
574 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
575 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
576 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700577 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
578 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300579 i++;
580 }
581
David Gow8f0877c2021-05-13 12:32:01 -0700582 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300583
584 i = ARRAY_SIZE(test_data) - 1;
585 tb_for_each_port_on_path(dst_port, src_port, p) {
586 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
587 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
588 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700589 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
590 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300591 i--;
592 }
593
594 KUNIT_EXPECT_EQ(test, i, -1);
595}
596
597static void tb_test_path_complex_tree_walk(struct kunit *test)
598{
599 /*
600 * Walks from Device #3 DP IN to Device #9 DP OUT.
601 *
602 * [Host]
603 * 1 |
604 * 1 |
605 * [Device #1]
606 * 3 / | 5 \ 7
607 * 1 / | \ 1
608 * [Device #2] | [Device #5]
609 * 5 | | 1 \ 7
610 * 1 | [Device #4] \ 1
611 * [Device #3] [Device #6]
612 * 3 /
613 * 1 /
614 * [Device #7]
615 * 3 / | 5
616 * 1 / |
617 * [Device #8] | 1
618 * [Device #9]
619 */
620 static const struct port_expectation test_data[] = {
621 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
622 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
623 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
624 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
625 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
626 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
627 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
628 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
629 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
630 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
631 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
632 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
633 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
634 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
635 };
636 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
637 struct tb_port *src_port, *dst_port, *p;
638 int i;
639
640 host = alloc_host(test);
641 dev1 = alloc_dev_default(test, host, 0x1, true);
642 dev2 = alloc_dev_default(test, dev1, 0x301, true);
643 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
644 alloc_dev_default(test, dev1, 0x501, true);
645 dev5 = alloc_dev_default(test, dev1, 0x701, true);
646 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
647 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
648 alloc_dev_default(test, dev7, 0x303070701, true);
649 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
650
651 src_port = &dev3->ports[13];
652 dst_port = &dev9->ports[14];
653
654 /* Walk both directions */
655
656 i = 0;
657 tb_for_each_port_on_path(src_port, dst_port, p) {
658 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
659 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
660 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700661 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
662 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300663 i++;
664 }
665
David Gow8f0877c2021-05-13 12:32:01 -0700666 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300667
668 i = ARRAY_SIZE(test_data) - 1;
669 tb_for_each_port_on_path(dst_port, src_port, p) {
670 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
671 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
672 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700673 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
674 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300675 i--;
676 }
677
678 KUNIT_EXPECT_EQ(test, i, -1);
679}
680
681static void tb_test_path_max_length_walk(struct kunit *test)
682{
683 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
684 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
685 struct tb_port *src_port, *dst_port, *p;
686 int i;
687
688 /*
689 * Walks from Device #6 DP IN to Device #12 DP OUT.
690 *
691 * [Host]
692 * 1 / \ 3
693 * 1 / \ 1
694 * [Device #1] [Device #7]
695 * 3 | | 3
696 * 1 | | 1
697 * [Device #2] [Device #8]
698 * 3 | | 3
699 * 1 | | 1
700 * [Device #3] [Device #9]
701 * 3 | | 3
702 * 1 | | 1
703 * [Device #4] [Device #10]
704 * 3 | | 3
705 * 1 | | 1
706 * [Device #5] [Device #11]
707 * 3 | | 3
708 * 1 | | 1
709 * [Device #6] [Device #12]
710 */
711 static const struct port_expectation test_data[] = {
712 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
713 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
714 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
715 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
716 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
717 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
718 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
719 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
720 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
721 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
722 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
723 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
724 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
725 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
726 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
727 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
728 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
729 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
730 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
731 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
732 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
733 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
734 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
735 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
736 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
737 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
738 };
739
740 host = alloc_host(test);
741 dev1 = alloc_dev_default(test, host, 0x1, true);
742 dev2 = alloc_dev_default(test, dev1, 0x301, true);
743 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
744 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
745 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
746 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
747 dev7 = alloc_dev_default(test, host, 0x3, true);
748 dev8 = alloc_dev_default(test, dev7, 0x303, true);
749 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
750 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
751 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
752 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
753
754 src_port = &dev6->ports[13];
755 dst_port = &dev12->ports[13];
756
757 /* Walk both directions */
758
759 i = 0;
760 tb_for_each_port_on_path(src_port, dst_port, p) {
761 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
762 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
763 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700764 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
765 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300766 i++;
767 }
768
David Gow8f0877c2021-05-13 12:32:01 -0700769 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300770
771 i = ARRAY_SIZE(test_data) - 1;
772 tb_for_each_port_on_path(dst_port, src_port, p) {
773 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
774 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
775 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
David Gow824945a2021-06-24 01:48:23 -0700776 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
777 test_data[i].type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300778 i--;
779 }
780
781 KUNIT_EXPECT_EQ(test, i, -1);
782}
783
784static void tb_test_path_not_connected(struct kunit *test)
785{
786 struct tb_switch *host, *dev1, *dev2;
787 struct tb_port *down, *up;
788 struct tb_path *path;
789
790 host = alloc_host(test);
791 dev1 = alloc_dev_default(test, host, 0x3, false);
792 /* Not connected to anything */
793 dev2 = alloc_dev_default(test, NULL, 0x303, false);
794
795 down = &dev1->ports[10];
796 up = &dev2->ports[9];
797
798 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
799 KUNIT_ASSERT_TRUE(test, path == NULL);
800 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
801 KUNIT_ASSERT_TRUE(test, path == NULL);
802}
803
804struct hop_expectation {
805 u64 route;
806 u8 in_port;
807 enum tb_port_type in_type;
808 u8 out_port;
809 enum tb_port_type out_type;
810};
811
812static void tb_test_path_not_bonded_lane0(struct kunit *test)
813{
814 /*
815 * PCIe path from host to device using lane 0.
816 *
817 * [Host]
818 * 3 |: 4
819 * 1 |: 2
820 * [Device]
821 */
822 static const struct hop_expectation test_data[] = {
823 {
824 .route = 0x0,
825 .in_port = 9,
826 .in_type = TB_TYPE_PCIE_DOWN,
827 .out_port = 3,
828 .out_type = TB_TYPE_PORT,
829 },
830 {
831 .route = 0x3,
832 .in_port = 1,
833 .in_type = TB_TYPE_PORT,
834 .out_port = 9,
835 .out_type = TB_TYPE_PCIE_UP,
836 },
837 };
838 struct tb_switch *host, *dev;
839 struct tb_port *down, *up;
840 struct tb_path *path;
841 int i;
842
843 host = alloc_host(test);
844 dev = alloc_dev_default(test, host, 0x3, false);
845
846 down = &host->ports[9];
847 up = &dev->ports[9];
848
849 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
850 KUNIT_ASSERT_TRUE(test, path != NULL);
David Gow8f0877c2021-05-13 12:32:01 -0700851 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300852 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
853 const struct tb_port *in_port, *out_port;
854
855 in_port = path->hops[i].in_port;
856 out_port = path->hops[i].out_port;
857
858 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
859 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
David Gow824945a2021-06-24 01:48:23 -0700860 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
861 test_data[i].in_type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300862 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
863 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
David Gow824945a2021-06-24 01:48:23 -0700864 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
865 test_data[i].out_type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300866 }
867 tb_path_free(path);
868}
869
870static void tb_test_path_not_bonded_lane1(struct kunit *test)
871{
872 /*
873 * DP Video path from host to device using lane 1. Paths like
874 * these are only used with Thunderbolt 1 devices where lane
875 * bonding is not possible. USB4 specifically does not allow
876 * paths like this (you either use lane 0 where lane 1 is
877 * disabled or both lanes are bonded).
878 *
879 * [Host]
880 * 1 :| 2
881 * 1 :| 2
882 * [Device]
883 */
884 static const struct hop_expectation test_data[] = {
885 {
886 .route = 0x0,
887 .in_port = 5,
888 .in_type = TB_TYPE_DP_HDMI_IN,
889 .out_port = 2,
890 .out_type = TB_TYPE_PORT,
891 },
892 {
893 .route = 0x1,
894 .in_port = 2,
895 .in_type = TB_TYPE_PORT,
896 .out_port = 13,
897 .out_type = TB_TYPE_DP_HDMI_OUT,
898 },
899 };
900 struct tb_switch *host, *dev;
901 struct tb_port *in, *out;
902 struct tb_path *path;
903 int i;
904
905 host = alloc_host(test);
906 dev = alloc_dev_default(test, host, 0x1, false);
907
908 in = &host->ports[5];
909 out = &dev->ports[13];
910
911 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
912 KUNIT_ASSERT_TRUE(test, path != NULL);
David Gow8f0877c2021-05-13 12:32:01 -0700913 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300914 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
915 const struct tb_port *in_port, *out_port;
916
917 in_port = path->hops[i].in_port;
918 out_port = path->hops[i].out_port;
919
920 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
921 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
David Gow824945a2021-06-24 01:48:23 -0700922 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
923 test_data[i].in_type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300924 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
925 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
David Gow824945a2021-06-24 01:48:23 -0700926 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
927 test_data[i].out_type);
Mika Westerberg54509f52020-04-29 16:38:39 +0300928 }
929 tb_path_free(path);
930}
931
932static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
933{
934 /*
935 * DP Video path from host to device 3 using lane 1.
936 *
937 * [Host]
938 * 1 :| 2
939 * 1 :| 2
940 * [Device #1]
941 * 7 :| 8
942 * 1 :| 2
943 * [Device #2]
944 * 5 :| 6
945 * 1 :| 2
946 * [Device #3]
947 */
948 static const struct hop_expectation test_data[] = {
949 {
950 .route = 0x0,
951 .in_port = 5,
952 .in_type = TB_TYPE_DP_HDMI_IN,
953 .out_port = 2,
954 .out_type = TB_TYPE_PORT,
955 },
956 {
957 .route = 0x1,
958 .in_port = 2,
959 .in_type = TB_TYPE_PORT,
960 .out_port = 8,
961 .out_type = TB_TYPE_PORT,
962 },
963 {
964 .route = 0x701,
965 .in_port = 2,
966 .in_type = TB_TYPE_PORT,
967 .out_port = 6,
968 .out_type = TB_TYPE_PORT,
969 },
970 {
971 .route = 0x50701,
972 .in_port = 2,
973 .in_type = TB_TYPE_PORT,
974 .out_port = 13,
975 .out_type = TB_TYPE_DP_HDMI_OUT,
976 },
977 };
978 struct tb_switch *host, *dev1, *dev2, *dev3;
979 struct tb_port *in, *out;
980 struct tb_path *path;
981 int i;
982
983 host = alloc_host(test);
984 dev1 = alloc_dev_default(test, host, 0x1, false);
985 dev2 = alloc_dev_default(test, dev1, 0x701, false);
986 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
987
988 in = &host->ports[5];
989 out = &dev3->ports[13];
990
991 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
992 KUNIT_ASSERT_TRUE(test, path != NULL);
David Gow8f0877c2021-05-13 12:32:01 -0700993 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +0300994 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
995 const struct tb_port *in_port, *out_port;
996
997 in_port = path->hops[i].in_port;
998 out_port = path->hops[i].out_port;
999
1000 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1001 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
David Gow824945a2021-06-24 01:48:23 -07001002 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1003 test_data[i].in_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001004 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1005 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
David Gow824945a2021-06-24 01:48:23 -07001006 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1007 test_data[i].out_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001008 }
1009 tb_path_free(path);
1010}
1011
1012static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1013{
1014 /*
1015 * DP Video path from device 3 to host using lane 1.
1016 *
1017 * [Host]
1018 * 1 :| 2
1019 * 1 :| 2
1020 * [Device #1]
1021 * 7 :| 8
1022 * 1 :| 2
1023 * [Device #2]
1024 * 5 :| 6
1025 * 1 :| 2
1026 * [Device #3]
1027 */
1028 static const struct hop_expectation test_data[] = {
1029 {
1030 .route = 0x50701,
1031 .in_port = 13,
1032 .in_type = TB_TYPE_DP_HDMI_IN,
1033 .out_port = 2,
1034 .out_type = TB_TYPE_PORT,
1035 },
1036 {
1037 .route = 0x701,
1038 .in_port = 6,
1039 .in_type = TB_TYPE_PORT,
1040 .out_port = 2,
1041 .out_type = TB_TYPE_PORT,
1042 },
1043 {
1044 .route = 0x1,
1045 .in_port = 8,
1046 .in_type = TB_TYPE_PORT,
1047 .out_port = 2,
1048 .out_type = TB_TYPE_PORT,
1049 },
1050 {
1051 .route = 0x0,
1052 .in_port = 2,
1053 .in_type = TB_TYPE_PORT,
1054 .out_port = 5,
1055 .out_type = TB_TYPE_DP_HDMI_IN,
1056 },
1057 };
1058 struct tb_switch *host, *dev1, *dev2, *dev3;
1059 struct tb_port *in, *out;
1060 struct tb_path *path;
1061 int i;
1062
1063 host = alloc_host(test);
1064 dev1 = alloc_dev_default(test, host, 0x1, false);
1065 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1066 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1067
1068 in = &dev3->ports[13];
1069 out = &host->ports[5];
1070
1071 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1072 KUNIT_ASSERT_TRUE(test, path != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001073 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +03001074 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1075 const struct tb_port *in_port, *out_port;
1076
1077 in_port = path->hops[i].in_port;
1078 out_port = path->hops[i].out_port;
1079
1080 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1081 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
David Gow824945a2021-06-24 01:48:23 -07001082 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1083 test_data[i].in_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001084 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1085 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
David Gow824945a2021-06-24 01:48:23 -07001086 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1087 test_data[i].out_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001088 }
1089 tb_path_free(path);
1090}
1091
1092static void tb_test_path_mixed_chain(struct kunit *test)
1093{
1094 /*
1095 * DP Video path from host to device 4 where first and last link
1096 * is bonded.
1097 *
1098 * [Host]
1099 * 1 |
1100 * 1 |
1101 * [Device #1]
1102 * 7 :| 8
1103 * 1 :| 2
1104 * [Device #2]
1105 * 5 :| 6
1106 * 1 :| 2
1107 * [Device #3]
1108 * 3 |
1109 * 1 |
1110 * [Device #4]
1111 */
1112 static const struct hop_expectation test_data[] = {
1113 {
1114 .route = 0x0,
1115 .in_port = 5,
1116 .in_type = TB_TYPE_DP_HDMI_IN,
1117 .out_port = 1,
1118 .out_type = TB_TYPE_PORT,
1119 },
1120 {
1121 .route = 0x1,
1122 .in_port = 1,
1123 .in_type = TB_TYPE_PORT,
1124 .out_port = 8,
1125 .out_type = TB_TYPE_PORT,
1126 },
1127 {
1128 .route = 0x701,
1129 .in_port = 2,
1130 .in_type = TB_TYPE_PORT,
1131 .out_port = 6,
1132 .out_type = TB_TYPE_PORT,
1133 },
1134 {
1135 .route = 0x50701,
1136 .in_port = 2,
1137 .in_type = TB_TYPE_PORT,
1138 .out_port = 3,
1139 .out_type = TB_TYPE_PORT,
1140 },
1141 {
1142 .route = 0x3050701,
1143 .in_port = 1,
1144 .in_type = TB_TYPE_PORT,
1145 .out_port = 13,
1146 .out_type = TB_TYPE_DP_HDMI_OUT,
1147 },
1148 };
1149 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1150 struct tb_port *in, *out;
1151 struct tb_path *path;
1152 int i;
1153
1154 host = alloc_host(test);
1155 dev1 = alloc_dev_default(test, host, 0x1, true);
1156 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1157 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1158 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1159
1160 in = &host->ports[5];
1161 out = &dev4->ports[13];
1162
1163 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1164 KUNIT_ASSERT_TRUE(test, path != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001165 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +03001166 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1167 const struct tb_port *in_port, *out_port;
1168
1169 in_port = path->hops[i].in_port;
1170 out_port = path->hops[i].out_port;
1171
1172 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1173 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
David Gow824945a2021-06-24 01:48:23 -07001174 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1175 test_data[i].in_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001176 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1177 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
David Gow824945a2021-06-24 01:48:23 -07001178 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1179 test_data[i].out_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001180 }
1181 tb_path_free(path);
1182}
1183
1184static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1185{
1186 /*
1187 * DP Video path from device 4 to host where first and last link
1188 * is bonded.
1189 *
1190 * [Host]
1191 * 1 |
1192 * 1 |
1193 * [Device #1]
1194 * 7 :| 8
1195 * 1 :| 2
1196 * [Device #2]
1197 * 5 :| 6
1198 * 1 :| 2
1199 * [Device #3]
1200 * 3 |
1201 * 1 |
1202 * [Device #4]
1203 */
1204 static const struct hop_expectation test_data[] = {
1205 {
1206 .route = 0x3050701,
1207 .in_port = 13,
1208 .in_type = TB_TYPE_DP_HDMI_OUT,
1209 .out_port = 1,
1210 .out_type = TB_TYPE_PORT,
1211 },
1212 {
1213 .route = 0x50701,
1214 .in_port = 3,
1215 .in_type = TB_TYPE_PORT,
1216 .out_port = 2,
1217 .out_type = TB_TYPE_PORT,
1218 },
1219 {
1220 .route = 0x701,
1221 .in_port = 6,
1222 .in_type = TB_TYPE_PORT,
1223 .out_port = 2,
1224 .out_type = TB_TYPE_PORT,
1225 },
1226 {
1227 .route = 0x1,
1228 .in_port = 8,
1229 .in_type = TB_TYPE_PORT,
1230 .out_port = 1,
1231 .out_type = TB_TYPE_PORT,
1232 },
1233 {
1234 .route = 0x0,
1235 .in_port = 1,
1236 .in_type = TB_TYPE_PORT,
1237 .out_port = 5,
1238 .out_type = TB_TYPE_DP_HDMI_IN,
1239 },
1240 };
1241 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1242 struct tb_port *in, *out;
1243 struct tb_path *path;
1244 int i;
1245
1246 host = alloc_host(test);
1247 dev1 = alloc_dev_default(test, host, 0x1, true);
1248 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1249 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1250 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1251
1252 in = &dev4->ports[13];
1253 out = &host->ports[5];
1254
1255 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1256 KUNIT_ASSERT_TRUE(test, path != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001257 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
Mika Westerberg54509f52020-04-29 16:38:39 +03001258 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1259 const struct tb_port *in_port, *out_port;
1260
1261 in_port = path->hops[i].in_port;
1262 out_port = path->hops[i].out_port;
1263
1264 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1265 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
David Gow824945a2021-06-24 01:48:23 -07001266 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1267 test_data[i].in_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001268 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1269 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
David Gow824945a2021-06-24 01:48:23 -07001270 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1271 test_data[i].out_type);
Mika Westerberg54509f52020-04-29 16:38:39 +03001272 }
1273 tb_path_free(path);
1274}
1275
Mika Westerberg40c14d92020-05-04 17:10:40 +03001276static void tb_test_tunnel_pcie(struct kunit *test)
1277{
1278 struct tb_switch *host, *dev1, *dev2;
1279 struct tb_tunnel *tunnel1, *tunnel2;
1280 struct tb_port *down, *up;
1281
1282 /*
1283 * Create PCIe tunnel between host and two devices.
1284 *
1285 * [Host]
1286 * 1 |
1287 * 1 |
1288 * [Device #1]
1289 * 5 |
1290 * 1 |
1291 * [Device #2]
1292 */
1293 host = alloc_host(test);
1294 dev1 = alloc_dev_default(test, host, 0x1, true);
1295 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1296
1297 down = &host->ports[8];
1298 up = &dev1->ports[9];
1299 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1300 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001301 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001302 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1303 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
David Gow8f0877c2021-05-13 12:32:01 -07001304 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001305 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1306 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1307 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1308 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1309 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1310 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1311
1312 down = &dev1->ports[10];
1313 up = &dev2->ports[9];
1314 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1315 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001316 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001317 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1318 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
David Gow8f0877c2021-05-13 12:32:01 -07001319 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001320 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1321 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1322 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1323 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1324 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1325 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1326
1327 tb_tunnel_free(tunnel2);
1328 tb_tunnel_free(tunnel1);
1329}
1330
1331static void tb_test_tunnel_dp(struct kunit *test)
1332{
1333 struct tb_switch *host, *dev;
1334 struct tb_port *in, *out;
1335 struct tb_tunnel *tunnel;
1336
1337 /*
1338 * Create DP tunnel between Host and Device
1339 *
1340 * [Host]
1341 * 1 |
1342 * 1 |
1343 * [Device]
1344 */
1345 host = alloc_host(test);
1346 dev = alloc_dev_default(test, host, 0x3, true);
1347
1348 in = &host->ports[5];
1349 out = &dev->ports[13];
1350
1351 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1352 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001353 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001354 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1355 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
David Gow8f0877c2021-05-13 12:32:01 -07001356 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001357 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1358 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1359 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1360 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1361 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1362 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1363 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1364 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1365 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1366 tb_tunnel_free(tunnel);
1367}
1368
1369static void tb_test_tunnel_dp_chain(struct kunit *test)
1370{
1371 struct tb_switch *host, *dev1, *dev4;
1372 struct tb_port *in, *out;
1373 struct tb_tunnel *tunnel;
1374
1375 /*
1376 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1377 *
1378 * [Host]
1379 * 1 |
1380 * 1 |
1381 * [Device #1]
1382 * 3 / | 5 \ 7
1383 * 1 / | \ 1
1384 * [Device #2] | [Device #4]
1385 * | 1
1386 * [Device #3]
1387 */
1388 host = alloc_host(test);
1389 dev1 = alloc_dev_default(test, host, 0x1, true);
1390 alloc_dev_default(test, dev1, 0x301, true);
1391 alloc_dev_default(test, dev1, 0x501, true);
1392 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1393
1394 in = &host->ports[5];
1395 out = &dev4->ports[14];
1396
1397 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1398 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001399 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001400 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1401 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
David Gow8f0877c2021-05-13 12:32:01 -07001402 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001403 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1404 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1405 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1406 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1407 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1408 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1409 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1410 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1411 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1412 tb_tunnel_free(tunnel);
1413}
1414
1415static void tb_test_tunnel_dp_tree(struct kunit *test)
1416{
1417 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1418 struct tb_port *in, *out;
1419 struct tb_tunnel *tunnel;
1420
1421 /*
1422 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1423 *
1424 * [Host]
1425 * 3 |
1426 * 1 |
1427 * [Device #1]
1428 * 3 / | 5 \ 7
1429 * 1 / | \ 1
1430 * [Device #2] | [Device #4]
1431 * | 1
1432 * [Device #3]
1433 * | 5
1434 * | 1
1435 * [Device #5]
1436 */
1437 host = alloc_host(test);
1438 dev1 = alloc_dev_default(test, host, 0x3, true);
1439 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1440 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1441 alloc_dev_default(test, dev1, 0x703, true);
1442 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1443
1444 in = &dev2->ports[13];
1445 out = &dev5->ports[13];
1446
1447 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1448 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001449 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001450 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1451 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
David Gow8f0877c2021-05-13 12:32:01 -07001452 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001453 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1454 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1455 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1456 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1457 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1458 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1459 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1460 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1461 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1462 tb_tunnel_free(tunnel);
1463}
1464
1465static void tb_test_tunnel_dp_max_length(struct kunit *test)
1466{
1467 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1468 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1469 struct tb_port *in, *out;
1470 struct tb_tunnel *tunnel;
1471
1472 /*
1473 * Creates DP tunnel from Device #6 to Device #12.
1474 *
1475 * [Host]
1476 * 1 / \ 3
1477 * 1 / \ 1
1478 * [Device #1] [Device #7]
1479 * 3 | | 3
1480 * 1 | | 1
1481 * [Device #2] [Device #8]
1482 * 3 | | 3
1483 * 1 | | 1
1484 * [Device #3] [Device #9]
1485 * 3 | | 3
1486 * 1 | | 1
1487 * [Device #4] [Device #10]
1488 * 3 | | 3
1489 * 1 | | 1
1490 * [Device #5] [Device #11]
1491 * 3 | | 3
1492 * 1 | | 1
1493 * [Device #6] [Device #12]
1494 */
1495 host = alloc_host(test);
1496 dev1 = alloc_dev_default(test, host, 0x1, true);
1497 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1498 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1499 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1500 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1501 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1502 dev7 = alloc_dev_default(test, host, 0x3, true);
1503 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1504 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1505 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1506 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1507 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1508
1509 in = &dev6->ports[13];
1510 out = &dev12->ports[13];
1511
1512 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1513 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001514 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001515 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1516 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
David Gow8f0877c2021-05-13 12:32:01 -07001517 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001518 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1519 /* First hop */
1520 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1521 /* Middle */
1522 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1523 &host->ports[1]);
1524 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1525 &host->ports[3]);
1526 /* Last */
1527 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1528 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1529 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1530 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1531 &host->ports[1]);
1532 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1533 &host->ports[3]);
1534 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1535 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1536 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1537 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1538 &host->ports[3]);
1539 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1540 &host->ports[1]);
1541 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1542 tb_tunnel_free(tunnel);
1543}
1544
1545static void tb_test_tunnel_usb3(struct kunit *test)
1546{
1547 struct tb_switch *host, *dev1, *dev2;
1548 struct tb_tunnel *tunnel1, *tunnel2;
1549 struct tb_port *down, *up;
1550
1551 /*
1552 * Create USB3 tunnel between host and two devices.
1553 *
1554 * [Host]
1555 * 1 |
1556 * 1 |
1557 * [Device #1]
1558 * \ 7
1559 * \ 1
1560 * [Device #2]
1561 */
1562 host = alloc_host(test);
1563 dev1 = alloc_dev_default(test, host, 0x1, true);
1564 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1565
1566 down = &host->ports[12];
1567 up = &dev1->ports[16];
1568 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1569 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001570 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001571 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1572 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
David Gow8f0877c2021-05-13 12:32:01 -07001573 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001574 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1575 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1576 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1577 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1578 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1579 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1580
1581 down = &dev1->ports[17];
1582 up = &dev2->ports[16];
1583 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1584 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001585 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001586 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1587 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
David Gow8f0877c2021-05-13 12:32:01 -07001588 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
Mika Westerberg40c14d92020-05-04 17:10:40 +03001589 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1590 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1591 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1592 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1593 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1594 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1595
1596 tb_tunnel_free(tunnel2);
1597 tb_tunnel_free(tunnel1);
1598}
1599
1600static void tb_test_tunnel_port_on_path(struct kunit *test)
1601{
1602 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1603 struct tb_port *in, *out, *port;
1604 struct tb_tunnel *dp_tunnel;
1605
1606 /*
1607 * [Host]
1608 * 3 |
1609 * 1 |
1610 * [Device #1]
1611 * 3 / | 5 \ 7
1612 * 1 / | \ 1
1613 * [Device #2] | [Device #4]
1614 * | 1
1615 * [Device #3]
1616 * | 5
1617 * | 1
1618 * [Device #5]
1619 */
1620 host = alloc_host(test);
1621 dev1 = alloc_dev_default(test, host, 0x3, true);
1622 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1623 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1624 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1625 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1626
1627 in = &dev2->ports[13];
1628 out = &dev5->ports[13];
1629
1630 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1631 KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
1632
1633 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1634 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1635
1636 port = &host->ports[8];
1637 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1638
1639 port = &host->ports[3];
1640 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1641
1642 port = &dev1->ports[1];
1643 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1644
1645 port = &dev1->ports[3];
1646 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1647
1648 port = &dev1->ports[5];
1649 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1650
1651 port = &dev1->ports[7];
1652 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1653
1654 port = &dev3->ports[1];
1655 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1656
1657 port = &dev5->ports[1];
1658 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1659
1660 port = &dev4->ports[1];
1661 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1662
1663 tb_tunnel_free(dp_tunnel);
1664}
1665
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001666static void tb_test_tunnel_dma(struct kunit *test)
1667{
1668 struct tb_port *nhi, *port;
1669 struct tb_tunnel *tunnel;
1670 struct tb_switch *host;
1671
1672 /*
1673 * Create DMA tunnel from NHI to port 1 and back.
1674 *
1675 * [Host 1]
1676 * 1 ^ In HopID 1 -> Out HopID 8
1677 * |
1678 * v In HopID 8 -> Out HopID 1
1679 * ............ Domain border
1680 * |
1681 * [Host 2]
1682 */
1683 host = alloc_host(test);
1684 nhi = &host->ports[7];
1685 port = &host->ports[1];
1686
1687 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1688 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001689 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001690 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1691 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
David Gow8f0877c2021-05-13 12:32:01 -07001692 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001693 /* RX path */
1694 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1695 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1696 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1697 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1698 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1699 /* TX path */
1700 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1701 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1702 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1703 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1704 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1705
1706 tb_tunnel_free(tunnel);
1707}
1708
1709static void tb_test_tunnel_dma_rx(struct kunit *test)
1710{
1711 struct tb_port *nhi, *port;
1712 struct tb_tunnel *tunnel;
1713 struct tb_switch *host;
1714
1715 /*
1716 * Create DMA RX tunnel from port 1 to NHI.
1717 *
1718 * [Host 1]
1719 * 1 ^
1720 * |
1721 * | In HopID 15 -> Out HopID 2
1722 * ............ Domain border
1723 * |
1724 * [Host 2]
1725 */
1726 host = alloc_host(test);
1727 nhi = &host->ports[7];
1728 port = &host->ports[1];
1729
1730 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1731 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001732 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001733 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1734 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
David Gow8f0877c2021-05-13 12:32:01 -07001735 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001736 /* RX path */
1737 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1738 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1739 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1740 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1741 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1742
1743 tb_tunnel_free(tunnel);
1744}
1745
1746static void tb_test_tunnel_dma_tx(struct kunit *test)
1747{
1748 struct tb_port *nhi, *port;
1749 struct tb_tunnel *tunnel;
1750 struct tb_switch *host;
1751
1752 /*
1753 * Create DMA TX tunnel from NHI to port 1.
1754 *
1755 * [Host 1]
1756 * 1 | In HopID 2 -> Out HopID 15
1757 * |
1758 * v
1759 * ............ Domain border
1760 * |
1761 * [Host 2]
1762 */
1763 host = alloc_host(test);
1764 nhi = &host->ports[7];
1765 port = &host->ports[1];
1766
1767 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1768 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001769 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001770 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1771 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
David Gow8f0877c2021-05-13 12:32:01 -07001772 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001773 /* TX path */
1774 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1775 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1776 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1777 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1778 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1779
1780 tb_tunnel_free(tunnel);
1781}
1782
1783static void tb_test_tunnel_dma_chain(struct kunit *test)
1784{
1785 struct tb_switch *host, *dev1, *dev2;
1786 struct tb_port *nhi, *port;
1787 struct tb_tunnel *tunnel;
1788
1789 /*
1790 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1791 *
1792 * [Host 1]
1793 * 1 ^ In HopID 1 -> Out HopID x
1794 * |
1795 * 1 | In HopID x -> Out HopID 1
1796 * [Device #1]
1797 * 7 \
1798 * 1 \
1799 * [Device #2]
1800 * 3 | In HopID x -> Out HopID 8
1801 * |
1802 * v In HopID 8 -> Out HopID x
1803 * ............ Domain border
1804 * |
1805 * [Host 2]
1806 */
1807 host = alloc_host(test);
1808 dev1 = alloc_dev_default(test, host, 0x1, true);
1809 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1810
1811 nhi = &host->ports[7];
1812 port = &dev2->ports[3];
1813 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1814 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07001815 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001816 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1817 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
David Gow8f0877c2021-05-13 12:32:01 -07001818 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
Mika Westerberg5adab6c2021-01-08 16:32:19 +02001819 /* RX path */
1820 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1821 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1822 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1823 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1824 &dev2->ports[1]);
1825 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1826 &dev1->ports[7]);
1827 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1828 &dev1->ports[1]);
1829 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1830 &host->ports[1]);
1831 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1832 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1833 /* TX path */
1834 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1835 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1836 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1837 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1838 &dev1->ports[1]);
1839 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1840 &dev1->ports[7]);
1841 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1842 &dev2->ports[1]);
1843 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1844 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1845
1846 tb_tunnel_free(tunnel);
1847}
1848
1849static void tb_test_tunnel_dma_match(struct kunit *test)
1850{
1851 struct tb_port *nhi, *port;
1852 struct tb_tunnel *tunnel;
1853 struct tb_switch *host;
1854
1855 host = alloc_host(test);
1856 nhi = &host->ports[7];
1857 port = &host->ports[1];
1858
1859 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1860 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1861
1862 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1863 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1864 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1865 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1866 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1867 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1868 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1869 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1870 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1871 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1872
1873 tb_tunnel_free(tunnel);
1874
1875 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1876 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1877 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1878 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1879 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1880 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1881 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1882 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1883 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1884
1885 tb_tunnel_free(tunnel);
1886
1887 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1888 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1889 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1890 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1891 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1892 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1893 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1894 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1895 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1896
1897 tb_tunnel_free(tunnel);
1898}
1899
Mika Westerbergbfa8f782021-04-28 19:04:02 +03001900static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
1901{
1902 struct tb_switch *host, *dev;
1903 struct tb_port *up, *down;
1904 struct tb_tunnel *tunnel;
1905 struct tb_path *path;
1906
1907 host = alloc_host(test);
1908 dev = alloc_dev_default(test, host, 0x1, false);
1909
1910 down = &host->ports[8];
1911 up = &dev->ports[9];
1912 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1913 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1914 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1915
1916 path = tunnel->paths[0];
1917 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1918 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1919 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1920 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1921 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1922
1923 path = tunnel->paths[1];
1924 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1925 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1926 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1927 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1928 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1929
1930 tb_tunnel_free(tunnel);
1931}
1932
1933static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
1934{
1935 struct tb_switch *host, *dev;
1936 struct tb_port *up, *down;
1937 struct tb_tunnel *tunnel;
1938 struct tb_path *path;
1939
1940 host = alloc_host(test);
1941 dev = alloc_dev_default(test, host, 0x1, true);
1942
1943 down = &host->ports[8];
1944 up = &dev->ports[9];
1945 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1946 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1947 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1948
1949 path = tunnel->paths[0];
1950 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1951 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1952 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1953 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1954 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1955
1956 path = tunnel->paths[1];
1957 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1958 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1959 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1960 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1961 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1962
1963 tb_tunnel_free(tunnel);
1964}
1965
1966static void tb_test_credit_alloc_pcie(struct kunit *test)
1967{
1968 struct tb_switch *host, *dev;
1969 struct tb_port *up, *down;
1970 struct tb_tunnel *tunnel;
1971 struct tb_path *path;
1972
1973 host = alloc_host_usb4(test);
1974 dev = alloc_dev_usb4(test, host, 0x1, true);
1975
1976 down = &host->ports[8];
1977 up = &dev->ports[9];
1978 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1979 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1980 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1981
1982 path = tunnel->paths[0];
1983 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1984 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1985 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1986 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1987 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1988
1989 path = tunnel->paths[1];
1990 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1991 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1992 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1993 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1994 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
1995
1996 tb_tunnel_free(tunnel);
1997}
1998
1999static void tb_test_credit_alloc_dp(struct kunit *test)
2000{
2001 struct tb_switch *host, *dev;
2002 struct tb_port *in, *out;
2003 struct tb_tunnel *tunnel;
2004 struct tb_path *path;
2005
2006 host = alloc_host_usb4(test);
2007 dev = alloc_dev_usb4(test, host, 0x1, true);
2008
2009 in = &host->ports[5];
2010 out = &dev->ports[14];
2011
2012 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
2013 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2014 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2015
2016 /* Video (main) path */
2017 path = tunnel->paths[0];
2018 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2019 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2020 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2021 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2022 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2023
2024 /* AUX TX */
2025 path = tunnel->paths[1];
2026 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2027 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2028 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2029 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2030 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2031
2032 /* AUX RX */
2033 path = tunnel->paths[2];
2034 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2035 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2036 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2037 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2038 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2039
2040 tb_tunnel_free(tunnel);
2041}
2042
2043static void tb_test_credit_alloc_usb3(struct kunit *test)
2044{
2045 struct tb_switch *host, *dev;
2046 struct tb_port *up, *down;
2047 struct tb_tunnel *tunnel;
2048 struct tb_path *path;
2049
2050 host = alloc_host_usb4(test);
2051 dev = alloc_dev_usb4(test, host, 0x1, true);
2052
2053 down = &host->ports[12];
2054 up = &dev->ports[16];
2055 tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2056 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2057 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2058
2059 path = tunnel->paths[0];
2060 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2061 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2062 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2063 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2064 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2065
2066 path = tunnel->paths[1];
2067 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2068 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2069 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2070 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2071 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2072
2073 tb_tunnel_free(tunnel);
2074}
2075
2076static void tb_test_credit_alloc_dma(struct kunit *test)
2077{
2078 struct tb_switch *host, *dev;
2079 struct tb_port *nhi, *port;
2080 struct tb_tunnel *tunnel;
2081 struct tb_path *path;
2082
2083 host = alloc_host_usb4(test);
2084 dev = alloc_dev_usb4(test, host, 0x1, true);
2085
2086 nhi = &host->ports[7];
2087 port = &dev->ports[3];
2088
2089 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2090 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2091 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2092
2093 /* DMA RX */
2094 path = tunnel->paths[0];
2095 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2096 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2097 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2098 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2099 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2100
2101 /* DMA TX */
2102 path = tunnel->paths[1];
2103 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2104 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2105 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2106 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2107 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2108
2109 tb_tunnel_free(tunnel);
2110}
2111
2112static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2113{
2114 struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2115 struct tb_switch *host, *dev;
2116 struct tb_port *nhi, *port;
2117 struct tb_path *path;
2118
2119 host = alloc_host_usb4(test);
2120 dev = alloc_dev_usb4(test, host, 0x1, true);
2121
2122 nhi = &host->ports[7];
2123 port = &dev->ports[3];
2124
2125 /*
2126 * Create three DMA tunnels through the same ports. With the
2127 * default buffers we should be able to create two and the last
2128 * one fails.
2129 *
2130 * For default host we have following buffers for DMA:
2131 *
2132 * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2133 *
2134 * For device we have following:
2135 *
2136 * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2137 *
2138 * spare = 14 + 1 = 15
2139 *
2140 * So on host the first tunnel gets 14 and the second gets the
2141 * remaining 1 and then we run out of buffers.
2142 */
2143 tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2144 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
2145 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2146
2147 path = tunnel1->paths[0];
2148 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2149 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2150 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2151 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2152 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2153
2154 path = tunnel1->paths[1];
2155 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2156 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2157 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2158 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2159 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2160
2161 tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2162 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
2163 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2164
2165 path = tunnel2->paths[0];
2166 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2167 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2168 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2169 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2170 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2171
2172 path = tunnel2->paths[1];
2173 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2174 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2175 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2176 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2177 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2178
2179 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2180 KUNIT_ASSERT_TRUE(test, tunnel3 == NULL);
2181
2182 /*
2183 * Release the first DMA tunnel. That should make 14 buffers
2184 * available for the next tunnel.
2185 */
2186 tb_tunnel_free(tunnel1);
2187
2188 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2189 KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
2190
2191 path = tunnel3->paths[0];
2192 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2193 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2194 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2195 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2196 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2197
2198 path = tunnel3->paths[1];
2199 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2200 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2201 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2202 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2203 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2204
2205 tb_tunnel_free(tunnel3);
2206 tb_tunnel_free(tunnel2);
2207}
2208
Linus Torvalds4b93c542021-09-06 12:27:03 -07002209static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2210 struct tb_switch *host, struct tb_switch *dev)
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002211{
Linus Torvalds4b93c542021-09-06 12:27:03 -07002212 struct tb_port *up, *down;
2213 struct tb_tunnel *pcie_tunnel;
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002214 struct tb_path *path;
2215
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002216 down = &host->ports[8];
2217 up = &dev->ports[9];
2218 pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2219 KUNIT_ASSERT_TRUE(test, pcie_tunnel != NULL);
2220 KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2221
2222 path = pcie_tunnel->paths[0];
2223 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2224 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2225 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2226 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2227 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2228
2229 path = pcie_tunnel->paths[1];
2230 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2231 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2232 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2233 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2234 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2235
Linus Torvalds4b93c542021-09-06 12:27:03 -07002236 return pcie_tunnel;
2237}
2238
2239static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2240 struct tb_switch *host, struct tb_switch *dev)
2241{
2242 struct tb_port *in, *out;
2243 struct tb_tunnel *dp_tunnel1;
2244 struct tb_path *path;
2245
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002246 in = &host->ports[5];
2247 out = &dev->ports[13];
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002248 dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
2249 KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL);
2250 KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2251
2252 path = dp_tunnel1->paths[0];
2253 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2254 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2255 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2256 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2257 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2258
2259 path = dp_tunnel1->paths[1];
2260 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2261 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2262 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2263 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2264 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2265
2266 path = dp_tunnel1->paths[2];
2267 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2268 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2269 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2270 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2271 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2272
Linus Torvalds4b93c542021-09-06 12:27:03 -07002273 return dp_tunnel1;
2274}
2275
2276static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2277 struct tb_switch *host, struct tb_switch *dev)
2278{
2279 struct tb_port *in, *out;
2280 struct tb_tunnel *dp_tunnel2;
2281 struct tb_path *path;
2282
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002283 in = &host->ports[6];
2284 out = &dev->ports[14];
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002285 dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
2286 KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL);
2287 KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2288
2289 path = dp_tunnel2->paths[0];
2290 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2291 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2292 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2293 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2294 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2295
2296 path = dp_tunnel2->paths[1];
2297 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2298 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2299 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2300 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2301 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2302
2303 path = dp_tunnel2->paths[2];
2304 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2305 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2306 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2307 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2308 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2309
Linus Torvalds4b93c542021-09-06 12:27:03 -07002310 return dp_tunnel2;
2311}
2312
2313static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2314 struct tb_switch *host, struct tb_switch *dev)
2315{
2316 struct tb_port *up, *down;
2317 struct tb_tunnel *usb3_tunnel;
2318 struct tb_path *path;
2319
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002320 down = &host->ports[12];
2321 up = &dev->ports[16];
2322 usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2323 KUNIT_ASSERT_TRUE(test, usb3_tunnel != NULL);
2324 KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2325
2326 path = usb3_tunnel->paths[0];
2327 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2328 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2329 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2330 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2331 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2332
2333 path = usb3_tunnel->paths[1];
2334 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2335 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2336 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2337 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2338 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2339
Linus Torvalds4b93c542021-09-06 12:27:03 -07002340 return usb3_tunnel;
2341}
2342
2343static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2344 struct tb_switch *host, struct tb_switch *dev)
2345{
2346 struct tb_port *nhi, *port;
2347 struct tb_tunnel *dma_tunnel1;
2348 struct tb_path *path;
2349
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002350 nhi = &host->ports[7];
2351 port = &dev->ports[3];
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002352 dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2353 KUNIT_ASSERT_TRUE(test, dma_tunnel1 != NULL);
2354 KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2355
2356 path = dma_tunnel1->paths[0];
2357 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2358 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2359 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2360 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2361 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2362
2363 path = dma_tunnel1->paths[1];
2364 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2365 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2366 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2367 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2368 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2369
Linus Torvalds4b93c542021-09-06 12:27:03 -07002370 return dma_tunnel1;
2371}
2372
2373static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2374 struct tb_switch *host, struct tb_switch *dev)
2375{
2376 struct tb_port *nhi, *port;
2377 struct tb_tunnel *dma_tunnel2;
2378 struct tb_path *path;
2379
2380 nhi = &host->ports[7];
2381 port = &dev->ports[3];
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002382 dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2383 KUNIT_ASSERT_TRUE(test, dma_tunnel2 != NULL);
2384 KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2385
2386 path = dma_tunnel2->paths[0];
2387 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2388 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2389 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2390 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2391 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2392
2393 path = dma_tunnel2->paths[1];
2394 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2395 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2396 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2397 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2398 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2399
Linus Torvalds4b93c542021-09-06 12:27:03 -07002400 return dma_tunnel2;
2401}
2402
2403static void tb_test_credit_alloc_all(struct kunit *test)
2404{
2405 struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2406 struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2407 struct tb_switch *host, *dev;
2408
2409 /*
2410 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2411 * device. Expectation is that all these can be established with
2412 * the default credit allocation found in Intel hardware.
2413 */
2414
2415 host = alloc_host_usb4(test);
2416 dev = alloc_dev_usb4(test, host, 0x1, true);
2417
2418 pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2419 dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2420 dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2421 usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2422 dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2423 dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2424
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002425 tb_tunnel_free(dma_tunnel2);
2426 tb_tunnel_free(dma_tunnel1);
2427 tb_tunnel_free(usb3_tunnel);
2428 tb_tunnel_free(dp_tunnel2);
2429 tb_tunnel_free(dp_tunnel1);
2430 tb_tunnel_free(pcie_tunnel);
2431}
2432
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002433static const u32 root_directory[] = {
2434 0x55584401, /* "UXD" v1 */
2435 0x00000018, /* Root directory length */
2436 0x76656e64, /* "vend" */
2437 0x6f726964, /* "orid" */
2438 0x76000001, /* "v" R 1 */
2439 0x00000a27, /* Immediate value, ! Vendor ID */
2440 0x76656e64, /* "vend" */
2441 0x6f726964, /* "orid" */
2442 0x74000003, /* "t" R 3 */
2443 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
2444 0x64657669, /* "devi" */
2445 0x63656964, /* "ceid" */
2446 0x76000001, /* "v" R 1 */
2447 0x0000000a, /* Immediate value, ! Device ID */
2448 0x64657669, /* "devi" */
2449 0x63656964, /* "ceid" */
2450 0x74000003, /* "t" R 3 */
2451 0x0000001d, /* Text leaf offset, (“Macintosh”) */
2452 0x64657669, /* "devi" */
2453 0x63657276, /* "cerv" */
2454 0x76000001, /* "v" R 1 */
2455 0x80000100, /* Immediate value, Device Revision */
2456 0x6e657477, /* "netw" */
2457 0x6f726b00, /* "ork" */
2458 0x44000014, /* "D" R 20 */
2459 0x00000021, /* Directory data offset, (Network Directory) */
2460 0x4170706c, /* "Appl" */
2461 0x6520496e, /* "e In" */
2462 0x632e0000, /* "c." ! */
2463 0x4d616369, /* "Maci" */
2464 0x6e746f73, /* "ntos" */
2465 0x68000000, /* "h" */
2466 0x00000000, /* padding */
2467 0xca8961c6, /* Directory UUID, Network Directory */
2468 0x9541ce1c, /* Directory UUID, Network Directory */
2469 0x5949b8bd, /* Directory UUID, Network Directory */
2470 0x4f5a5f2e, /* Directory UUID, Network Directory */
2471 0x70727463, /* "prtc" */
2472 0x69640000, /* "id" */
2473 0x76000001, /* "v" R 1 */
2474 0x00000001, /* Immediate value, Network Protocol ID */
2475 0x70727463, /* "prtc" */
2476 0x76657273, /* "vers" */
2477 0x76000001, /* "v" R 1 */
2478 0x00000001, /* Immediate value, Network Protocol Version */
2479 0x70727463, /* "prtc" */
2480 0x72657673, /* "revs" */
2481 0x76000001, /* "v" R 1 */
2482 0x00000001, /* Immediate value, Network Protocol Revision */
2483 0x70727463, /* "prtc" */
2484 0x73746e73, /* "stns" */
2485 0x76000001, /* "v" R 1 */
2486 0x00000000, /* Immediate value, Network Protocol Settings */
2487};
2488
2489static const uuid_t network_dir_uuid =
2490 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2491 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2492
2493static void tb_test_property_parse(struct kunit *test)
2494{
2495 struct tb_property_dir *dir, *network_dir;
2496 struct tb_property *p;
2497
2498 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2499 KUNIT_ASSERT_TRUE(test, dir != NULL);
2500
2501 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2502 KUNIT_ASSERT_TRUE(test, !p);
2503
2504 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2505 KUNIT_ASSERT_TRUE(test, p != NULL);
2506 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2507
2508 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2509 KUNIT_ASSERT_TRUE(test, p != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07002510 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002511
2512 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2513 KUNIT_ASSERT_TRUE(test, p != NULL);
2514 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2515
2516 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2517 KUNIT_ASSERT_TRUE(test, p != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07002518 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002519
2520 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2521 KUNIT_ASSERT_TRUE(test, !p);
2522
2523 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2524 KUNIT_ASSERT_TRUE(test, p != NULL);
2525
2526 network_dir = p->value.dir;
2527 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2528
2529 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2530 KUNIT_ASSERT_TRUE(test, p != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07002531 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002532
2533 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2534 KUNIT_ASSERT_TRUE(test, p != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07002535 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002536
2537 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2538 KUNIT_ASSERT_TRUE(test, p != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07002539 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002540
2541 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2542 KUNIT_ASSERT_TRUE(test, p != NULL);
David Gow8f0877c2021-05-13 12:32:01 -07002543 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002544
2545 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2546 KUNIT_EXPECT_TRUE(test, !p);
2547 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2548 KUNIT_EXPECT_TRUE(test, !p);
2549
2550 tb_property_free_dir(dir);
2551}
2552
2553static void tb_test_property_format(struct kunit *test)
2554{
2555 struct tb_property_dir *dir;
2556 ssize_t block_len;
2557 u32 *block;
2558 int ret, i;
2559
2560 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2561 KUNIT_ASSERT_TRUE(test, dir != NULL);
2562
2563 ret = tb_property_format_dir(dir, NULL, 0);
David Gow8f0877c2021-05-13 12:32:01 -07002564 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002565
2566 block_len = ret;
2567
2568 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2569 KUNIT_ASSERT_TRUE(test, block != NULL);
2570
2571 ret = tb_property_format_dir(dir, block, block_len);
2572 KUNIT_EXPECT_EQ(test, ret, 0);
2573
2574 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2575 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2576
2577 tb_property_free_dir(dir);
2578}
2579
2580static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2581 struct tb_property_dir *d2)
2582{
2583 struct tb_property *p1, *p2, *tmp;
2584 int n1, n2, i;
2585
2586 if (d1->uuid) {
2587 KUNIT_ASSERT_TRUE(test, d2->uuid != NULL);
2588 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2589 } else {
2590 KUNIT_ASSERT_TRUE(test, d2->uuid == NULL);
2591 }
2592
2593 n1 = 0;
2594 tb_property_for_each(d1, tmp)
2595 n1++;
2596 KUNIT_ASSERT_NE(test, n1, 0);
2597
2598 n2 = 0;
2599 tb_property_for_each(d2, tmp)
2600 n2++;
2601 KUNIT_ASSERT_NE(test, n2, 0);
2602
2603 KUNIT_ASSERT_EQ(test, n1, n2);
2604
2605 p1 = NULL;
2606 p2 = NULL;
2607 for (i = 0; i < n1; i++) {
2608 p1 = tb_property_get_next(d1, p1);
2609 KUNIT_ASSERT_TRUE(test, p1 != NULL);
2610 p2 = tb_property_get_next(d2, p2);
2611 KUNIT_ASSERT_TRUE(test, p2 != NULL);
2612
2613 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2614 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2615 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2616
2617 switch (p1->type) {
2618 case TB_PROPERTY_TYPE_DIRECTORY:
2619 KUNIT_ASSERT_TRUE(test, p1->value.dir != NULL);
2620 KUNIT_ASSERT_TRUE(test, p2->value.dir != NULL);
2621 compare_dirs(test, p1->value.dir, p2->value.dir);
2622 break;
2623
2624 case TB_PROPERTY_TYPE_DATA:
2625 KUNIT_ASSERT_TRUE(test, p1->value.data != NULL);
2626 KUNIT_ASSERT_TRUE(test, p2->value.data != NULL);
2627 KUNIT_ASSERT_TRUE(test,
2628 !memcmp(p1->value.data, p2->value.data,
2629 p1->length * 4)
2630 );
2631 break;
2632
2633 case TB_PROPERTY_TYPE_TEXT:
2634 KUNIT_ASSERT_TRUE(test, p1->value.text != NULL);
2635 KUNIT_ASSERT_TRUE(test, p2->value.text != NULL);
2636 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2637 break;
2638
2639 case TB_PROPERTY_TYPE_VALUE:
2640 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2641 p2->value.immediate);
2642 break;
2643 default:
2644 KUNIT_FAIL(test, "unexpected property type");
2645 break;
2646 }
2647 }
2648}
2649
2650static void tb_test_property_copy(struct kunit *test)
2651{
2652 struct tb_property_dir *src, *dst;
2653 u32 *block;
2654 int ret, i;
2655
2656 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2657 KUNIT_ASSERT_TRUE(test, src != NULL);
2658
2659 dst = tb_property_copy_dir(src);
2660 KUNIT_ASSERT_TRUE(test, dst != NULL);
2661
2662 /* Compare the structures */
2663 compare_dirs(test, src, dst);
2664
2665 /* Compare the resulting property block */
2666 ret = tb_property_format_dir(dst, NULL, 0);
David Gow8f0877c2021-05-13 12:32:01 -07002667 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002668
2669 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2670 KUNIT_ASSERT_TRUE(test, block != NULL);
2671
2672 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2673 KUNIT_EXPECT_TRUE(test, !ret);
2674
2675 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2676 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2677
2678 tb_property_free_dir(dst);
2679 tb_property_free_dir(src);
2680}
2681
Mika Westerberg54509f52020-04-29 16:38:39 +03002682static struct kunit_case tb_test_cases[] = {
2683 KUNIT_CASE(tb_test_path_basic),
2684 KUNIT_CASE(tb_test_path_not_connected_walk),
2685 KUNIT_CASE(tb_test_path_single_hop_walk),
2686 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2687 KUNIT_CASE(tb_test_path_simple_tree_walk),
2688 KUNIT_CASE(tb_test_path_complex_tree_walk),
2689 KUNIT_CASE(tb_test_path_max_length_walk),
2690 KUNIT_CASE(tb_test_path_not_connected),
2691 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2692 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2693 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2694 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2695 KUNIT_CASE(tb_test_path_mixed_chain),
2696 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
Mika Westerberg40c14d92020-05-04 17:10:40 +03002697 KUNIT_CASE(tb_test_tunnel_pcie),
2698 KUNIT_CASE(tb_test_tunnel_dp),
2699 KUNIT_CASE(tb_test_tunnel_dp_chain),
2700 KUNIT_CASE(tb_test_tunnel_dp_tree),
2701 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2702 KUNIT_CASE(tb_test_tunnel_port_on_path),
2703 KUNIT_CASE(tb_test_tunnel_usb3),
Mika Westerberg5adab6c2021-01-08 16:32:19 +02002704 KUNIT_CASE(tb_test_tunnel_dma),
2705 KUNIT_CASE(tb_test_tunnel_dma_rx),
2706 KUNIT_CASE(tb_test_tunnel_dma_tx),
2707 KUNIT_CASE(tb_test_tunnel_dma_chain),
2708 KUNIT_CASE(tb_test_tunnel_dma_match),
Mika Westerbergbfa8f782021-04-28 19:04:02 +03002709 KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2710 KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2711 KUNIT_CASE(tb_test_credit_alloc_pcie),
2712 KUNIT_CASE(tb_test_credit_alloc_dp),
2713 KUNIT_CASE(tb_test_credit_alloc_usb3),
2714 KUNIT_CASE(tb_test_credit_alloc_dma),
2715 KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2716 KUNIT_CASE(tb_test_credit_alloc_all),
Mika Westerberg15a4c7e2020-05-04 17:00:38 +03002717 KUNIT_CASE(tb_test_property_parse),
2718 KUNIT_CASE(tb_test_property_format),
2719 KUNIT_CASE(tb_test_property_copy),
Mika Westerberg54509f52020-04-29 16:38:39 +03002720 { }
2721};
2722
2723static struct kunit_suite tb_test_suite = {
2724 .name = "thunderbolt",
2725 .test_cases = tb_test_cases,
2726};
Mika Westerberg2c6ea4e2020-08-24 12:46:52 +03002727
2728static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
2729
2730int tb_test_init(void)
2731{
2732 return __kunit_test_suites_init(tb_test_suites);
2733}
2734
2735void tb_test_exit(void)
2736{
2737 return __kunit_test_suites_exit(tb_test_suites);
2738}