Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 2 | /* |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 3 | * Thunderbolt driver - path/tunnel functionality |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 6 | * Copyright (C) 2019, Intel Corporation |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/errno.h> |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 11 | #include <linux/delay.h> |
| 12 | #include <linux/ktime.h> |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 13 | |
| 14 | #include "tb.h" |
| 15 | |
Mika Westerberg | 6755156 | 2019-03-06 19:33:23 +0200 | [diff] [blame] | 16 | static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs) |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 17 | { |
Mika Westerberg | 6755156 | 2019-03-06 19:33:23 +0200 | [diff] [blame] | 18 | const struct tb_port *port = hop->in_port; |
| 19 | |
| 20 | tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n", |
| 21 | hop->in_hop_index, regs->out_port, regs->next_hop); |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 22 | tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n", |
Mika Westerberg | 6755156 | 2019-03-06 19:33:23 +0200 | [diff] [blame] | 23 | regs->weight, regs->priority, |
| 24 | regs->initial_credits, regs->drop_packages); |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 25 | tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n", |
Mika Westerberg | 6755156 | 2019-03-06 19:33:23 +0200 | [diff] [blame] | 26 | regs->counter_enable, regs->counter); |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 27 | tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n", |
Mika Westerberg | 6755156 | 2019-03-06 19:33:23 +0200 | [diff] [blame] | 28 | regs->ingress_fc, regs->egress_fc, |
| 29 | regs->ingress_shared_buffer, regs->egress_shared_buffer); |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 30 | tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n", |
Mika Westerberg | 6755156 | 2019-03-06 19:33:23 +0200 | [diff] [blame] | 31 | regs->unknown1, regs->unknown2, regs->unknown3); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 32 | } |
| 33 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 34 | static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid, |
| 35 | int dst_hopid) |
| 36 | { |
| 37 | struct tb_port *port, *out_port = NULL; |
| 38 | struct tb_regs_hop hop; |
| 39 | struct tb_switch *sw; |
| 40 | int i, ret, hopid; |
| 41 | |
| 42 | hopid = src_hopid; |
| 43 | port = src; |
| 44 | |
| 45 | for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) { |
| 46 | sw = port->sw; |
| 47 | |
| 48 | ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2); |
| 49 | if (ret) { |
| 50 | tb_port_warn(port, "failed to read path at %d\n", hopid); |
| 51 | return NULL; |
| 52 | } |
| 53 | |
| 54 | if (!hop.enable) |
| 55 | return NULL; |
| 56 | |
| 57 | out_port = &sw->ports[hop.out_port]; |
| 58 | hopid = hop.next_hop; |
| 59 | port = out_port->remote; |
| 60 | } |
| 61 | |
| 62 | return out_port && hopid == dst_hopid ? out_port : NULL; |
| 63 | } |
| 64 | |
| 65 | static int tb_path_find_src_hopid(struct tb_port *src, |
| 66 | const struct tb_port *dst, int dst_hopid) |
| 67 | { |
| 68 | struct tb_port *out; |
| 69 | int i; |
| 70 | |
| 71 | for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) { |
| 72 | out = tb_path_find_dst_port(src, i, dst_hopid); |
| 73 | if (out == dst) |
| 74 | return i; |
| 75 | } |
| 76 | |
| 77 | return 0; |
| 78 | } |
| 79 | |
| 80 | /** |
| 81 | * tb_path_discover() - Discover a path |
| 82 | * @src: First input port of a path |
| 83 | * @src_hopid: Starting HopID of a path (%-1 if don't care) |
| 84 | * @dst: Expected destination port of the path (%NULL if don't care) |
| 85 | * @dst_hopid: HopID to the @dst (%-1 if don't care) |
| 86 | * @last: Last port is filled here if not %NULL |
| 87 | * @name: Name of the path |
| 88 | * |
| 89 | * Follows a path starting from @src and @src_hopid to the last output |
| 90 | * port of the path. Allocates HopIDs for the visited ports. Call |
| 91 | * tb_path_free() to release the path and allocated HopIDs when the path |
| 92 | * is not needed anymore. |
| 93 | * |
| 94 | * Note function discovers also incomplete paths so caller should check |
| 95 | * that the @dst port is the expected one. If it is not, the path can be |
| 96 | * cleaned up by calling tb_path_deactivate() before tb_path_free(). |
| 97 | * |
| 98 | * Return: Discovered path on success, %NULL in case of failure |
| 99 | */ |
| 100 | struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, |
| 101 | struct tb_port *dst, int dst_hopid, |
| 102 | struct tb_port **last, const char *name) |
| 103 | { |
| 104 | struct tb_port *out_port; |
| 105 | struct tb_regs_hop hop; |
| 106 | struct tb_path *path; |
| 107 | struct tb_switch *sw; |
| 108 | struct tb_port *p; |
| 109 | size_t num_hops; |
| 110 | int ret, i, h; |
| 111 | |
| 112 | if (src_hopid < 0 && dst) { |
| 113 | /* |
| 114 | * For incomplete paths the intermediate HopID can be |
| 115 | * different from the one used by the protocol adapter |
| 116 | * so in that case find a path that ends on @dst with |
| 117 | * matching @dst_hopid. That should give us the correct |
| 118 | * HopID for the @src. |
| 119 | */ |
| 120 | src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid); |
| 121 | if (!src_hopid) |
| 122 | return NULL; |
| 123 | } |
| 124 | |
| 125 | p = src; |
| 126 | h = src_hopid; |
| 127 | num_hops = 0; |
| 128 | |
| 129 | for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) { |
| 130 | sw = p->sw; |
| 131 | |
| 132 | ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2); |
| 133 | if (ret) { |
| 134 | tb_port_warn(p, "failed to read path at %d\n", h); |
| 135 | return NULL; |
| 136 | } |
| 137 | |
| 138 | /* If the hop is not enabled we got an incomplete path */ |
| 139 | if (!hop.enable) |
| 140 | break; |
| 141 | |
| 142 | out_port = &sw->ports[hop.out_port]; |
| 143 | if (last) |
| 144 | *last = out_port; |
| 145 | |
| 146 | h = hop.next_hop; |
| 147 | p = out_port->remote; |
| 148 | num_hops++; |
| 149 | } |
| 150 | |
| 151 | path = kzalloc(sizeof(*path), GFP_KERNEL); |
| 152 | if (!path) |
| 153 | return NULL; |
| 154 | |
| 155 | path->name = name; |
| 156 | path->tb = src->sw->tb; |
| 157 | path->path_length = num_hops; |
| 158 | path->activated = true; |
| 159 | |
| 160 | path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); |
| 161 | if (!path->hops) { |
| 162 | kfree(path); |
| 163 | return NULL; |
| 164 | } |
| 165 | |
| 166 | p = src; |
| 167 | h = src_hopid; |
| 168 | |
| 169 | for (i = 0; i < num_hops; i++) { |
| 170 | int next_hop; |
| 171 | |
| 172 | sw = p->sw; |
| 173 | |
| 174 | ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2); |
| 175 | if (ret) { |
| 176 | tb_port_warn(p, "failed to read path at %d\n", h); |
| 177 | goto err; |
| 178 | } |
| 179 | |
| 180 | if (tb_port_alloc_in_hopid(p, h, h) < 0) |
| 181 | goto err; |
| 182 | |
| 183 | out_port = &sw->ports[hop.out_port]; |
| 184 | next_hop = hop.next_hop; |
| 185 | |
| 186 | if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) { |
| 187 | tb_port_release_in_hopid(p, h); |
| 188 | goto err; |
| 189 | } |
| 190 | |
| 191 | path->hops[i].in_port = p; |
| 192 | path->hops[i].in_hop_index = h; |
| 193 | path->hops[i].in_counter_index = -1; |
| 194 | path->hops[i].out_port = out_port; |
| 195 | path->hops[i].next_hop_index = next_hop; |
| 196 | |
| 197 | h = next_hop; |
| 198 | p = out_port->remote; |
| 199 | } |
| 200 | |
| 201 | return path; |
| 202 | |
| 203 | err: |
| 204 | tb_port_warn(src, "failed to discover path starting at HopID %d\n", |
| 205 | src_hopid); |
| 206 | tb_path_free(path); |
| 207 | return NULL; |
| 208 | } |
| 209 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 210 | /** |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 211 | * tb_path_alloc() - allocate a thunderbolt path between two ports |
| 212 | * @tb: Domain pointer |
| 213 | * @src: Source port of the path |
| 214 | * @src_hopid: HopID used for the first ingress port in the path |
| 215 | * @dst: Destination port of the path |
| 216 | * @dst_hopid: HopID used for the last egress port in the path |
| 217 | * @link_nr: Preferred link if there are dual links on the path |
| 218 | * @name: Name of the path |
| 219 | * |
| 220 | * Creates path between two ports starting with given @src_hopid. Reserves |
| 221 | * HopIDs for each port (they can be different from @src_hopid depending on |
| 222 | * how many HopIDs each port already have reserved). If there are dual |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 223 | * links on the path, prioritizes using @link_nr but takes into account |
| 224 | * that the lanes may be bonded. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 225 | * |
| 226 | * Return: Returns a tb_path on success or NULL on failure. |
| 227 | */ |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 228 | struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, |
| 229 | struct tb_port *dst, int dst_hopid, int link_nr, |
| 230 | const char *name) |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 231 | { |
Mika Westerberg | 7e897bb | 2020-05-17 10:44:31 +0300 | [diff] [blame] | 232 | struct tb_port *in_port, *out_port, *first_port, *last_port; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 233 | int in_hopid, out_hopid; |
| 234 | struct tb_path *path; |
| 235 | size_t num_hops; |
| 236 | int i, ret; |
| 237 | |
| 238 | path = kzalloc(sizeof(*path), GFP_KERNEL); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 239 | if (!path) |
| 240 | return NULL; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 241 | |
Mika Westerberg | 7e897bb | 2020-05-17 10:44:31 +0300 | [diff] [blame] | 242 | first_port = last_port = NULL; |
Mika Westerberg | c64c3f3 | 2020-04-29 17:07:59 +0300 | [diff] [blame] | 243 | i = 0; |
Mika Westerberg | 7e897bb | 2020-05-17 10:44:31 +0300 | [diff] [blame] | 244 | tb_for_each_port_on_path(src, dst, in_port) { |
| 245 | if (!first_port) |
| 246 | first_port = in_port; |
| 247 | last_port = in_port; |
Mika Westerberg | c64c3f3 | 2020-04-29 17:07:59 +0300 | [diff] [blame] | 248 | i++; |
Mika Westerberg | 7e897bb | 2020-05-17 10:44:31 +0300 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | /* Check that src and dst are reachable */ |
| 252 | if (first_port != src || last_port != dst) { |
| 253 | kfree(path); |
| 254 | return NULL; |
| 255 | } |
Mika Westerberg | c64c3f3 | 2020-04-29 17:07:59 +0300 | [diff] [blame] | 256 | |
| 257 | /* Each hop takes two ports */ |
| 258 | num_hops = i / 2; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 259 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 260 | path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); |
| 261 | if (!path->hops) { |
| 262 | kfree(path); |
| 263 | return NULL; |
| 264 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 265 | |
| 266 | in_hopid = src_hopid; |
| 267 | out_port = NULL; |
| 268 | |
| 269 | for (i = 0; i < num_hops; i++) { |
| 270 | in_port = tb_next_port_on_path(src, dst, out_port); |
| 271 | if (!in_port) |
| 272 | goto err; |
| 273 | |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 274 | /* When lanes are bonded primary link must be used */ |
| 275 | if (!in_port->bonded && in_port->dual_link_port && |
| 276 | in_port->link_nr != link_nr) |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 277 | in_port = in_port->dual_link_port; |
| 278 | |
| 279 | ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid); |
| 280 | if (ret < 0) |
| 281 | goto err; |
| 282 | in_hopid = ret; |
| 283 | |
| 284 | out_port = tb_next_port_on_path(src, dst, in_port); |
| 285 | if (!out_port) |
| 286 | goto err; |
| 287 | |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 288 | /* |
| 289 | * Pick up right port when going from non-bonded to |
| 290 | * bonded or from bonded to non-bonded. |
| 291 | */ |
| 292 | if (out_port->dual_link_port) { |
| 293 | if (!in_port->bonded && out_port->bonded && |
| 294 | out_port->link_nr) { |
| 295 | /* |
| 296 | * Use primary link when going from |
| 297 | * non-bonded to bonded. |
| 298 | */ |
| 299 | out_port = out_port->dual_link_port; |
| 300 | } else if (!out_port->bonded && |
| 301 | out_port->link_nr != link_nr) { |
| 302 | /* |
| 303 | * If out port is not bonded follow |
| 304 | * link_nr. |
| 305 | */ |
| 306 | out_port = out_port->dual_link_port; |
| 307 | } |
| 308 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 309 | |
| 310 | if (i == num_hops - 1) |
| 311 | ret = tb_port_alloc_out_hopid(out_port, dst_hopid, |
| 312 | dst_hopid); |
| 313 | else |
| 314 | ret = tb_port_alloc_out_hopid(out_port, -1, -1); |
| 315 | |
| 316 | if (ret < 0) |
| 317 | goto err; |
| 318 | out_hopid = ret; |
| 319 | |
| 320 | path->hops[i].in_hop_index = in_hopid; |
| 321 | path->hops[i].in_port = in_port; |
| 322 | path->hops[i].in_counter_index = -1; |
| 323 | path->hops[i].out_port = out_port; |
| 324 | path->hops[i].next_hop_index = out_hopid; |
| 325 | |
| 326 | in_hopid = out_hopid; |
| 327 | } |
| 328 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 329 | path->tb = tb; |
| 330 | path->path_length = num_hops; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 331 | path->name = name; |
| 332 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 333 | return path; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 334 | |
| 335 | err: |
| 336 | tb_path_free(path); |
| 337 | return NULL; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 338 | } |
| 339 | |
| 340 | /** |
Mika Westerberg | ab9f31c | 2019-03-06 18:21:08 +0200 | [diff] [blame] | 341 | * tb_path_free() - free a path |
| 342 | * @path: Path to free |
| 343 | * |
| 344 | * Frees a path. The path does not need to be deactivated. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 345 | */ |
| 346 | void tb_path_free(struct tb_path *path) |
| 347 | { |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 348 | int i; |
| 349 | |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 350 | for (i = 0; i < path->path_length; i++) { |
| 351 | const struct tb_path_hop *hop = &path->hops[i]; |
| 352 | |
| 353 | if (hop->in_port) |
| 354 | tb_port_release_in_hopid(hop->in_port, |
| 355 | hop->in_hop_index); |
| 356 | if (hop->out_port) |
| 357 | tb_port_release_out_hopid(hop->out_port, |
| 358 | hop->next_hop_index); |
| 359 | } |
| 360 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 361 | kfree(path->hops); |
| 362 | kfree(path); |
| 363 | } |
| 364 | |
| 365 | static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop) |
| 366 | { |
| 367 | int i, res; |
| 368 | for (i = first_hop; i < path->path_length; i++) { |
| 369 | res = tb_port_add_nfc_credits(path->hops[i].in_port, |
| 370 | -path->nfc_credits); |
| 371 | if (res) |
| 372 | tb_port_warn(path->hops[i].in_port, |
| 373 | "nfc credits deallocation failed for hop %d\n", |
| 374 | i); |
| 375 | } |
| 376 | } |
| 377 | |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 378 | static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index, |
| 379 | bool clear_fc) |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 380 | { |
| 381 | struct tb_regs_hop hop; |
| 382 | ktime_t timeout; |
| 383 | int ret; |
| 384 | |
| 385 | /* Disable the path */ |
| 386 | ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); |
| 387 | if (ret) |
| 388 | return ret; |
| 389 | |
| 390 | /* Already disabled */ |
| 391 | if (!hop.enable) |
| 392 | return 0; |
| 393 | |
| 394 | hop.enable = 0; |
| 395 | |
| 396 | ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); |
| 397 | if (ret) |
| 398 | return ret; |
| 399 | |
| 400 | /* Wait until it is drained */ |
| 401 | timeout = ktime_add_ms(ktime_get(), 500); |
| 402 | do { |
| 403 | ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); |
| 404 | if (ret) |
| 405 | return ret; |
| 406 | |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 407 | if (!hop.pending) { |
| 408 | if (clear_fc) { |
| 409 | /* Clear flow control */ |
| 410 | hop.ingress_fc = 0; |
| 411 | hop.egress_fc = 0; |
| 412 | hop.ingress_shared_buffer = 0; |
| 413 | hop.egress_shared_buffer = 0; |
| 414 | |
| 415 | return tb_port_write(port, &hop, TB_CFG_HOPS, |
| 416 | 2 * hop_index, 2); |
| 417 | } |
| 418 | |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 419 | return 0; |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 420 | } |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 421 | |
| 422 | usleep_range(10, 20); |
| 423 | } while (ktime_before(ktime_get(), timeout)); |
| 424 | |
| 425 | return -ETIMEDOUT; |
| 426 | } |
| 427 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 428 | static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) |
| 429 | { |
| 430 | int i, res; |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 431 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 432 | for (i = first_hop; i < path->path_length; i++) { |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 433 | res = __tb_path_deactivate_hop(path->hops[i].in_port, |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 434 | path->hops[i].in_hop_index, |
| 435 | path->clear_fc); |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 436 | if (res && res != -ENODEV) |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 437 | tb_port_warn(path->hops[i].in_port, |
| 438 | "hop deactivation failed for hop %d, index %d\n", |
| 439 | i, path->hops[i].in_hop_index); |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | void tb_path_deactivate(struct tb_path *path) |
| 444 | { |
| 445 | if (!path->activated) { |
| 446 | tb_WARN(path->tb, "trying to deactivate an inactive path\n"); |
| 447 | return; |
| 448 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 449 | tb_dbg(path->tb, |
| 450 | "deactivating %s path from %llx:%x to %llx:%x\n", |
| 451 | path->name, tb_route(path->hops[0].in_port->sw), |
| 452 | path->hops[0].in_port->port, |
| 453 | tb_route(path->hops[path->path_length - 1].out_port->sw), |
| 454 | path->hops[path->path_length - 1].out_port->port); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 455 | __tb_path_deactivate_hops(path, 0); |
| 456 | __tb_path_deallocate_nfc(path, 0); |
| 457 | path->activated = false; |
| 458 | } |
| 459 | |
| 460 | /** |
| 461 | * tb_path_activate() - activate a path |
| 462 | * |
| 463 | * Activate a path starting with the last hop and iterating backwards. The |
| 464 | * caller must fill path->hops before calling tb_path_activate(). |
| 465 | * |
| 466 | * Return: Returns 0 on success or an error code on failure. |
| 467 | */ |
| 468 | int tb_path_activate(struct tb_path *path) |
| 469 | { |
| 470 | int i, res; |
| 471 | enum tb_path_port out_mask, in_mask; |
| 472 | if (path->activated) { |
| 473 | tb_WARN(path->tb, "trying to activate already activated path\n"); |
| 474 | return -EINVAL; |
| 475 | } |
| 476 | |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 477 | tb_dbg(path->tb, |
| 478 | "activating %s path from %llx:%x to %llx:%x\n", |
| 479 | path->name, tb_route(path->hops[0].in_port->sw), |
| 480 | path->hops[0].in_port->port, |
| 481 | tb_route(path->hops[path->path_length - 1].out_port->sw), |
| 482 | path->hops[path->path_length - 1].out_port->port); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 483 | |
| 484 | /* Clear counters. */ |
| 485 | for (i = path->path_length - 1; i >= 0; i--) { |
| 486 | if (path->hops[i].in_counter_index == -1) |
| 487 | continue; |
| 488 | res = tb_port_clear_counter(path->hops[i].in_port, |
| 489 | path->hops[i].in_counter_index); |
| 490 | if (res) |
| 491 | goto err; |
| 492 | } |
| 493 | |
| 494 | /* Add non flow controlled credits. */ |
| 495 | for (i = path->path_length - 1; i >= 0; i--) { |
| 496 | res = tb_port_add_nfc_credits(path->hops[i].in_port, |
| 497 | path->nfc_credits); |
| 498 | if (res) { |
| 499 | __tb_path_deallocate_nfc(path, i); |
| 500 | goto err; |
| 501 | } |
| 502 | } |
| 503 | |
| 504 | /* Activate hops. */ |
| 505 | for (i = path->path_length - 1; i >= 0; i--) { |
Andreas Noever | 72ad366 | 2014-08-26 17:42:21 +0200 | [diff] [blame] | 506 | struct tb_regs_hop hop = { 0 }; |
| 507 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 508 | /* If it is left active deactivate it first */ |
| 509 | __tb_path_deactivate_hop(path->hops[i].in_port, |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 510 | path->hops[i].in_hop_index, path->clear_fc); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 511 | |
| 512 | /* dword 0 */ |
| 513 | hop.next_hop = path->hops[i].next_hop_index; |
| 514 | hop.out_port = path->hops[i].out_port->port; |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 515 | hop.initial_credits = path->hops[i].initial_credits; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 516 | hop.unknown1 = 0; |
| 517 | hop.enable = 1; |
| 518 | |
| 519 | /* dword 1 */ |
| 520 | out_mask = (i == path->path_length - 1) ? |
| 521 | TB_PATH_DESTINATION : TB_PATH_INTERNAL; |
| 522 | in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL; |
| 523 | hop.weight = path->weight; |
| 524 | hop.unknown2 = 0; |
| 525 | hop.priority = path->priority; |
| 526 | hop.drop_packages = path->drop_packages; |
| 527 | hop.counter = path->hops[i].in_counter_index; |
| 528 | hop.counter_enable = path->hops[i].in_counter_index != -1; |
| 529 | hop.ingress_fc = path->ingress_fc_enable & in_mask; |
| 530 | hop.egress_fc = path->egress_fc_enable & out_mask; |
| 531 | hop.ingress_shared_buffer = path->ingress_shared_buffer |
| 532 | & in_mask; |
| 533 | hop.egress_shared_buffer = path->egress_shared_buffer |
| 534 | & out_mask; |
| 535 | hop.unknown3 = 0; |
| 536 | |
Mika Westerberg | 6755156 | 2019-03-06 19:33:23 +0200 | [diff] [blame] | 537 | tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i); |
| 538 | tb_dump_hop(&path->hops[i], &hop); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 539 | res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, |
| 540 | 2 * path->hops[i].in_hop_index, 2); |
| 541 | if (res) { |
| 542 | __tb_path_deactivate_hops(path, i); |
| 543 | __tb_path_deallocate_nfc(path, 0); |
| 544 | goto err; |
| 545 | } |
| 546 | } |
| 547 | path->activated = true; |
Mika Westerberg | 62efe69 | 2018-09-17 16:32:13 +0300 | [diff] [blame] | 548 | tb_dbg(path->tb, "path activation complete\n"); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 549 | return 0; |
| 550 | err: |
| 551 | tb_WARN(path->tb, "path activation failed\n"); |
| 552 | return res; |
| 553 | } |
| 554 | |
| 555 | /** |
| 556 | * tb_path_is_invalid() - check whether any ports on the path are invalid |
| 557 | * |
| 558 | * Return: Returns true if the path is invalid, false otherwise. |
| 559 | */ |
| 560 | bool tb_path_is_invalid(struct tb_path *path) |
| 561 | { |
| 562 | int i = 0; |
| 563 | for (i = 0; i < path->path_length; i++) { |
| 564 | if (path->hops[i].in_port->sw->is_unplugged) |
| 565 | return true; |
| 566 | if (path->hops[i].out_port->sw->is_unplugged) |
| 567 | return true; |
| 568 | } |
| 569 | return false; |
| 570 | } |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 571 | |
| 572 | /** |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 573 | * tb_path_port_on_path() - Does the path go through certain port |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 574 | * @path: Path to check |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 575 | * @port: Switch to check |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 576 | * |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 577 | * Goes over all hops on path and checks if @port is any of them. |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 578 | * Direction does not matter. |
| 579 | */ |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 580 | bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port) |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 581 | { |
| 582 | int i; |
| 583 | |
| 584 | for (i = 0; i < path->path_length; i++) { |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 585 | if (path->hops[i].in_port == port || |
| 586 | path->hops[i].out_port == port) |
Mika Westerberg | a11b88a | 2019-03-26 16:03:48 +0300 | [diff] [blame] | 587 | return true; |
| 588 | } |
| 589 | |
| 590 | return false; |
| 591 | } |