Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Thunderbolt Cactus Ridge driver - path/tunnel functionality |
| 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/errno.h> |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 10 | #include <linux/delay.h> |
| 11 | #include <linux/ktime.h> |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 12 | |
| 13 | #include "tb.h" |
| 14 | |
| 15 | |
| 16 | static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop) |
| 17 | { |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 18 | tb_port_dbg(port, " Hop through port %d to hop %d (%s)\n", |
| 19 | hop->out_port, hop->next_hop, |
| 20 | hop->enable ? "enabled" : "disabled"); |
| 21 | tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n", |
| 22 | hop->weight, hop->priority, |
| 23 | hop->initial_credits, hop->drop_packages); |
| 24 | tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n", |
| 25 | hop->counter_enable, hop->counter); |
| 26 | tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n", |
| 27 | hop->ingress_fc, hop->egress_fc, |
| 28 | hop->ingress_shared_buffer, hop->egress_shared_buffer); |
| 29 | tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n", |
| 30 | hop->unknown1, hop->unknown2, hop->unknown3); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | /** |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 34 | * tb_path_alloc() - allocate a thunderbolt path between two ports |
| 35 | * @tb: Domain pointer |
| 36 | * @src: Source port of the path |
| 37 | * @src_hopid: HopID used for the first ingress port in the path |
| 38 | * @dst: Destination port of the path |
| 39 | * @dst_hopid: HopID used for the last egress port in the path |
| 40 | * @link_nr: Preferred link if there are dual links on the path |
| 41 | * @name: Name of the path |
| 42 | * |
| 43 | * Creates path between two ports starting with given @src_hopid. Reserves |
| 44 | * HopIDs for each port (they can be different from @src_hopid depending on |
| 45 | * how many HopIDs each port already have reserved). If there are dual |
| 46 | * links on the path, prioritizes using @link_nr. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 47 | * |
| 48 | * Return: Returns a tb_path on success or NULL on failure. |
| 49 | */ |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 50 | struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, |
| 51 | struct tb_port *dst, int dst_hopid, int link_nr, |
| 52 | const char *name) |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 53 | { |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 54 | struct tb_port *in_port, *out_port; |
| 55 | int in_hopid, out_hopid; |
| 56 | struct tb_path *path; |
| 57 | size_t num_hops; |
| 58 | int i, ret; |
| 59 | |
| 60 | path = kzalloc(sizeof(*path), GFP_KERNEL); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 61 | if (!path) |
| 62 | return NULL; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 63 | |
| 64 | /* |
| 65 | * Number of hops on a path is the distance between the two |
| 66 | * switches plus the source adapter port. |
| 67 | */ |
| 68 | num_hops = abs(tb_route_length(tb_route(src->sw)) - |
| 69 | tb_route_length(tb_route(dst->sw))) + 1; |
| 70 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 71 | path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); |
| 72 | if (!path->hops) { |
| 73 | kfree(path); |
| 74 | return NULL; |
| 75 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 76 | |
| 77 | in_hopid = src_hopid; |
| 78 | out_port = NULL; |
| 79 | |
| 80 | for (i = 0; i < num_hops; i++) { |
| 81 | in_port = tb_next_port_on_path(src, dst, out_port); |
| 82 | if (!in_port) |
| 83 | goto err; |
| 84 | |
| 85 | if (in_port->dual_link_port && in_port->link_nr != link_nr) |
| 86 | in_port = in_port->dual_link_port; |
| 87 | |
| 88 | ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid); |
| 89 | if (ret < 0) |
| 90 | goto err; |
| 91 | in_hopid = ret; |
| 92 | |
| 93 | out_port = tb_next_port_on_path(src, dst, in_port); |
| 94 | if (!out_port) |
| 95 | goto err; |
| 96 | |
| 97 | if (out_port->dual_link_port && out_port->link_nr != link_nr) |
| 98 | out_port = out_port->dual_link_port; |
| 99 | |
| 100 | if (i == num_hops - 1) |
| 101 | ret = tb_port_alloc_out_hopid(out_port, dst_hopid, |
| 102 | dst_hopid); |
| 103 | else |
| 104 | ret = tb_port_alloc_out_hopid(out_port, -1, -1); |
| 105 | |
| 106 | if (ret < 0) |
| 107 | goto err; |
| 108 | out_hopid = ret; |
| 109 | |
| 110 | path->hops[i].in_hop_index = in_hopid; |
| 111 | path->hops[i].in_port = in_port; |
| 112 | path->hops[i].in_counter_index = -1; |
| 113 | path->hops[i].out_port = out_port; |
| 114 | path->hops[i].next_hop_index = out_hopid; |
| 115 | |
| 116 | in_hopid = out_hopid; |
| 117 | } |
| 118 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 119 | path->tb = tb; |
| 120 | path->path_length = num_hops; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 121 | path->name = name; |
| 122 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 123 | return path; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 124 | |
| 125 | err: |
| 126 | tb_path_free(path); |
| 127 | return NULL; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | /** |
| 131 | * tb_path_free() - free a deactivated path |
| 132 | */ |
| 133 | void tb_path_free(struct tb_path *path) |
| 134 | { |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 135 | int i; |
| 136 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 137 | if (path->activated) { |
| 138 | tb_WARN(path->tb, "trying to free an activated path\n") |
| 139 | return; |
| 140 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 141 | |
| 142 | for (i = 0; i < path->path_length; i++) { |
| 143 | const struct tb_path_hop *hop = &path->hops[i]; |
| 144 | |
| 145 | if (hop->in_port) |
| 146 | tb_port_release_in_hopid(hop->in_port, |
| 147 | hop->in_hop_index); |
| 148 | if (hop->out_port) |
| 149 | tb_port_release_out_hopid(hop->out_port, |
| 150 | hop->next_hop_index); |
| 151 | } |
| 152 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 153 | kfree(path->hops); |
| 154 | kfree(path); |
| 155 | } |
| 156 | |
| 157 | static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop) |
| 158 | { |
| 159 | int i, res; |
| 160 | for (i = first_hop; i < path->path_length; i++) { |
| 161 | res = tb_port_add_nfc_credits(path->hops[i].in_port, |
| 162 | -path->nfc_credits); |
| 163 | if (res) |
| 164 | tb_port_warn(path->hops[i].in_port, |
| 165 | "nfc credits deallocation failed for hop %d\n", |
| 166 | i); |
| 167 | } |
| 168 | } |
| 169 | |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 170 | static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index) |
| 171 | { |
| 172 | struct tb_regs_hop hop; |
| 173 | ktime_t timeout; |
| 174 | int ret; |
| 175 | |
| 176 | /* Disable the path */ |
| 177 | ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); |
| 178 | if (ret) |
| 179 | return ret; |
| 180 | |
| 181 | /* Already disabled */ |
| 182 | if (!hop.enable) |
| 183 | return 0; |
| 184 | |
| 185 | hop.enable = 0; |
| 186 | |
| 187 | ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); |
| 188 | if (ret) |
| 189 | return ret; |
| 190 | |
| 191 | /* Wait until it is drained */ |
| 192 | timeout = ktime_add_ms(ktime_get(), 500); |
| 193 | do { |
| 194 | ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); |
| 195 | if (ret) |
| 196 | return ret; |
| 197 | |
| 198 | if (!hop.pending) |
| 199 | return 0; |
| 200 | |
| 201 | usleep_range(10, 20); |
| 202 | } while (ktime_before(ktime_get(), timeout)); |
| 203 | |
| 204 | return -ETIMEDOUT; |
| 205 | } |
| 206 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 207 | static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) |
| 208 | { |
| 209 | int i, res; |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 210 | |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 211 | for (i = first_hop; i < path->path_length; i++) { |
Mika Westerberg | 4944269 | 2017-02-17 17:05:37 +0200 | [diff] [blame] | 212 | res = __tb_path_deactivate_hop(path->hops[i].in_port, |
| 213 | path->hops[i].in_hop_index); |
| 214 | if (res && res != -ENODEV) |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 215 | tb_port_warn(path->hops[i].in_port, |
| 216 | "hop deactivation failed for hop %d, index %d\n", |
| 217 | i, path->hops[i].in_hop_index); |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | void tb_path_deactivate(struct tb_path *path) |
| 222 | { |
| 223 | if (!path->activated) { |
| 224 | tb_WARN(path->tb, "trying to deactivate an inactive path\n"); |
| 225 | return; |
| 226 | } |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 227 | tb_dbg(path->tb, |
| 228 | "deactivating %s path from %llx:%x to %llx:%x\n", |
| 229 | path->name, tb_route(path->hops[0].in_port->sw), |
| 230 | path->hops[0].in_port->port, |
| 231 | tb_route(path->hops[path->path_length - 1].out_port->sw), |
| 232 | path->hops[path->path_length - 1].out_port->port); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 233 | __tb_path_deactivate_hops(path, 0); |
| 234 | __tb_path_deallocate_nfc(path, 0); |
| 235 | path->activated = false; |
| 236 | } |
| 237 | |
| 238 | /** |
| 239 | * tb_path_activate() - activate a path |
| 240 | * |
| 241 | * Activate a path starting with the last hop and iterating backwards. The |
| 242 | * caller must fill path->hops before calling tb_path_activate(). |
| 243 | * |
| 244 | * Return: Returns 0 on success or an error code on failure. |
| 245 | */ |
| 246 | int tb_path_activate(struct tb_path *path) |
| 247 | { |
| 248 | int i, res; |
| 249 | enum tb_path_port out_mask, in_mask; |
| 250 | if (path->activated) { |
| 251 | tb_WARN(path->tb, "trying to activate already activated path\n"); |
| 252 | return -EINVAL; |
| 253 | } |
| 254 | |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame^] | 255 | tb_dbg(path->tb, |
| 256 | "activating %s path from %llx:%x to %llx:%x\n", |
| 257 | path->name, tb_route(path->hops[0].in_port->sw), |
| 258 | path->hops[0].in_port->port, |
| 259 | tb_route(path->hops[path->path_length - 1].out_port->sw), |
| 260 | path->hops[path->path_length - 1].out_port->port); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 261 | |
| 262 | /* Clear counters. */ |
| 263 | for (i = path->path_length - 1; i >= 0; i--) { |
| 264 | if (path->hops[i].in_counter_index == -1) |
| 265 | continue; |
| 266 | res = tb_port_clear_counter(path->hops[i].in_port, |
| 267 | path->hops[i].in_counter_index); |
| 268 | if (res) |
| 269 | goto err; |
| 270 | } |
| 271 | |
| 272 | /* Add non flow controlled credits. */ |
| 273 | for (i = path->path_length - 1; i >= 0; i--) { |
| 274 | res = tb_port_add_nfc_credits(path->hops[i].in_port, |
| 275 | path->nfc_credits); |
| 276 | if (res) { |
| 277 | __tb_path_deallocate_nfc(path, i); |
| 278 | goto err; |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | /* Activate hops. */ |
| 283 | for (i = path->path_length - 1; i >= 0; i--) { |
Andreas Noever | 72ad366 | 2014-08-26 17:42:21 +0200 | [diff] [blame] | 284 | struct tb_regs_hop hop = { 0 }; |
| 285 | |
| 286 | /* |
| 287 | * We do (currently) not tear down paths setup by the firmeware. |
| 288 | * If a firmware device is unplugged and plugged in again then |
| 289 | * it can happen that we reuse some of the hops from the (now |
| 290 | * defunct) firmeware path. This causes the hotplug operation to |
| 291 | * fail (the pci device does not show up). Clearing the hop |
| 292 | * before overwriting it fixes the problem. |
| 293 | * |
| 294 | * Should be removed once we discover and tear down firmeware |
| 295 | * paths. |
| 296 | */ |
| 297 | res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, |
| 298 | 2 * path->hops[i].in_hop_index, 2); |
| 299 | if (res) { |
| 300 | __tb_path_deactivate_hops(path, i); |
| 301 | __tb_path_deallocate_nfc(path, 0); |
| 302 | goto err; |
| 303 | } |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 304 | |
| 305 | /* dword 0 */ |
| 306 | hop.next_hop = path->hops[i].next_hop_index; |
| 307 | hop.out_port = path->hops[i].out_port->port; |
| 308 | /* TODO: figure out why these are good values */ |
| 309 | hop.initial_credits = (i == path->path_length - 1) ? 16 : 7; |
| 310 | hop.unknown1 = 0; |
| 311 | hop.enable = 1; |
| 312 | |
| 313 | /* dword 1 */ |
| 314 | out_mask = (i == path->path_length - 1) ? |
| 315 | TB_PATH_DESTINATION : TB_PATH_INTERNAL; |
| 316 | in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL; |
| 317 | hop.weight = path->weight; |
| 318 | hop.unknown2 = 0; |
| 319 | hop.priority = path->priority; |
| 320 | hop.drop_packages = path->drop_packages; |
| 321 | hop.counter = path->hops[i].in_counter_index; |
| 322 | hop.counter_enable = path->hops[i].in_counter_index != -1; |
| 323 | hop.ingress_fc = path->ingress_fc_enable & in_mask; |
| 324 | hop.egress_fc = path->egress_fc_enable & out_mask; |
| 325 | hop.ingress_shared_buffer = path->ingress_shared_buffer |
| 326 | & in_mask; |
| 327 | hop.egress_shared_buffer = path->egress_shared_buffer |
| 328 | & out_mask; |
| 329 | hop.unknown3 = 0; |
| 330 | |
| 331 | tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d", |
| 332 | i, path->hops[i].in_hop_index); |
| 333 | tb_dump_hop(path->hops[i].in_port, &hop); |
| 334 | res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, |
| 335 | 2 * path->hops[i].in_hop_index, 2); |
| 336 | if (res) { |
| 337 | __tb_path_deactivate_hops(path, i); |
| 338 | __tb_path_deallocate_nfc(path, 0); |
| 339 | goto err; |
| 340 | } |
| 341 | } |
| 342 | path->activated = true; |
| 343 | tb_info(path->tb, "path activation complete\n"); |
| 344 | return 0; |
| 345 | err: |
| 346 | tb_WARN(path->tb, "path activation failed\n"); |
| 347 | return res; |
| 348 | } |
| 349 | |
| 350 | /** |
| 351 | * tb_path_is_invalid() - check whether any ports on the path are invalid |
| 352 | * |
| 353 | * Return: Returns true if the path is invalid, false otherwise. |
| 354 | */ |
| 355 | bool tb_path_is_invalid(struct tb_path *path) |
| 356 | { |
| 357 | int i = 0; |
| 358 | for (i = 0; i < path->path_length; i++) { |
| 359 | if (path->hops[i].in_port->sw->is_unplugged) |
| 360 | return true; |
| 361 | if (path->hops[i].out_port->sw->is_unplugged) |
| 362 | return true; |
| 363 | } |
| 364 | return false; |
| 365 | } |