blob: 20ce28276f7aa70c4d209ccf8bdf267345426b86 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
9#include <linux/slab.h>
10#include <linux/list.h>
11
Mika Westerberg1752b9f2017-02-19 10:58:35 +020012#include "tunnel.h"
Andreas Noever3364f0c2014-06-03 22:04:08 +020013#include "tb.h"
14
Mika Westerberg93f36ad2017-02-19 13:48:29 +020015#define TB_PCI_PATH_DOWN 0
16#define TB_PCI_PATH_UP 1
17
Andreas Noever3364f0c2014-06-03 22:04:08 +020018#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
19 do { \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020020 struct tb_tunnel *__tunnel = (tunnel); \
Andreas Noever3364f0c2014-06-03 22:04:08 +020021 level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
Mika Westerberg93f36ad2017-02-19 13:48:29 +020022 tb_route(__tunnel->src_port->sw), \
23 __tunnel->src_port->port, \
24 tb_route(__tunnel->dst_port->sw), \
25 __tunnel->dst_port->port, \
Andreas Noever3364f0c2014-06-03 22:04:08 +020026 ## arg); \
27 } while (0)
28
29#define tb_tunnel_WARN(tunnel, fmt, arg...) \
30 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
31#define tb_tunnel_warn(tunnel, fmt, arg...) \
32 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
33#define tb_tunnel_info(tunnel, fmt, arg...) \
34 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
35
Mika Westerberg93f36ad2017-02-19 13:48:29 +020036static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths)
37{
38 struct tb_tunnel *tunnel;
39
40 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
41 if (!tunnel)
42 return NULL;
43
44 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
45 if (!tunnel->paths) {
46 tb_tunnel_free(tunnel);
47 return NULL;
48 }
49
50 INIT_LIST_HEAD(&tunnel->list);
51 tunnel->tb = tb;
52 tunnel->npaths = npaths;
53
54 return tunnel;
55}
56
57static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
58{
59 int res;
60
61 res = tb_pci_port_enable(tunnel->src_port, activate);
62 if (res)
63 return res;
64
65 return tb_pci_port_enable(tunnel->dst_port, activate);
66}
67
Andreas Noever3364f0c2014-06-03 22:04:08 +020068static void tb_pci_init_path(struct tb_path *path)
69{
70 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
71 path->egress_shared_buffer = TB_PATH_NONE;
72 path->ingress_fc_enable = TB_PATH_ALL;
73 path->ingress_shared_buffer = TB_PATH_NONE;
74 path->priority = 3;
75 path->weight = 1;
76 path->drop_packages = 0;
77 path->nfc_credits = 0;
78}
79
80/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +020081 * tb_tunnel_alloc_pci() - allocate a pci tunnel
82 * @tb: Pointer to the domain structure
83 * @up: PCIe upstream adapter port
84 * @down: PCIe downstream adapter port
Andreas Noever3364f0c2014-06-03 22:04:08 +020085 *
86 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
87 * TB_TYPE_PCIE_DOWN.
88 *
89 * Currently only paths consisting of two hops are supported (that is the
90 * ports must be on "adjacent" switches).
91 *
92 * The paths are hard-coded to use hop 8 (the only working hop id available on
93 * my thunderbolt devices). Therefore at most ONE path per device may be
94 * activated.
95 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +020096 * Return: Returns a tb_tunnel on success or NULL on failure.
Andreas Noever3364f0c2014-06-03 22:04:08 +020097 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +020098struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
99 struct tb_port *down)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200100{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200101 struct tb_path *path_to_up;
102 struct tb_path *path_to_down;
103 struct tb_tunnel *tunnel;
104
105 tunnel = tb_tunnel_alloc(tb, 2);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200106 if (!tunnel)
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200107 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200108
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200109 tunnel->activate = tb_pci_activate;
110 tunnel->src_port = down;
111 tunnel->dst_port = up;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200112
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200113 path_to_up = tb_path_alloc(tb, 2);
114 if (!path_to_up) {
115 tb_tunnel_free(tunnel);
116 return NULL;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200117 }
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200118 tunnel->paths[TB_PCI_PATH_UP] = path_to_up;
119
120 path_to_down = tb_path_alloc(tb, 2);
121 if (!path_to_down) {
122 tb_tunnel_free(tunnel);
123 return NULL;
124 }
125 tunnel->paths[TB_PCI_PATH_DOWN] = path_to_down;
126
127 tb_pci_init_path(path_to_up);
128 tb_pci_init_path(path_to_down);
129
130 path_to_up->hops[0].in_port = down;
131 path_to_up->hops[0].in_hop_index = 8;
132 path_to_up->hops[0].in_counter_index = -1;
133 path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
134 path_to_up->hops[0].next_hop_index = 8;
135
136 path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
137 path_to_up->hops[1].in_hop_index = 8;
138 path_to_up->hops[1].in_counter_index = -1;
139 path_to_up->hops[1].out_port = up;
140 path_to_up->hops[1].next_hop_index = 8;
141
142 path_to_down->hops[0].in_port = up;
143 path_to_down->hops[0].in_hop_index = 8;
144 path_to_down->hops[0].in_counter_index = -1;
145 path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
146 path_to_down->hops[0].next_hop_index = 8;
147
148 path_to_down->hops[1].in_port = tb_upstream_port(up->sw)->remote;
149 path_to_down->hops[1].in_hop_index = 8;
150 path_to_down->hops[1].in_counter_index = -1;
151 path_to_down->hops[1].out_port = down;
152 path_to_down->hops[1].next_hop_index = 8;
153
154 return tunnel;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200155}
156
157/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200158 * tb_tunnel_free() - free a tunnel
159 * @tunnel: Tunnel to be freed
Andreas Noever3364f0c2014-06-03 22:04:08 +0200160 *
161 * The tunnel must have been deactivated.
162 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200163void tb_tunnel_free(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200164{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200165 int i;
166
167 if (!tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200168 return;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200169
170 for (i = 0; i < tunnel->npaths; i++) {
171 if (tunnel->paths[i] && tunnel->paths[i]->activated) {
172 tb_tunnel_WARN(tunnel,
173 "trying to free an activated tunnel\n");
174 return;
175 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200176 }
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200177
178 for (i = 0; i < tunnel->npaths; i++) {
179 if (tunnel->paths[i])
180 tb_path_free(tunnel->paths[i]);
181 }
182
183 kfree(tunnel->paths);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200184 kfree(tunnel);
185}
186
187/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200188 * tb_tunnel_is_invalid - check whether an activated path is still valid
189 * @tunnel: Tunnel to check
Andreas Noever3364f0c2014-06-03 22:04:08 +0200190 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200191bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200192{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200193 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200194
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200195 for (i = 0; i < tunnel->npaths; i++) {
196 WARN_ON(!tunnel->paths[i]->activated);
197 if (tb_path_is_invalid(tunnel->paths[i]))
198 return true;
199 }
200
201 return false;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200202}
203
204/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200205 * tb_tunnel_restart() - activate a tunnel after a hardware reset
206 * @tunnel: Tunnel to restart
Andreas Noever3364f0c2014-06-03 22:04:08 +0200207 *
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200208 * Return: 0 on success and negative errno in case if failure
Andreas Noever3364f0c2014-06-03 22:04:08 +0200209 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200210int tb_tunnel_restart(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200211{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200212 int res, i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200213
214 tb_tunnel_info(tunnel, "activating\n");
215
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200216 for (i = 0; i < tunnel->npaths; i++) {
217 tunnel->paths[i]->activated = false;
218 res = tb_path_activate(tunnel->paths[i]);
219 if (res)
220 goto err;
221 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200222
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200223 if (tunnel->activate) {
224 res = tunnel->activate(tunnel, true);
225 if (res)
226 goto err;
227 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200228
Andreas Noever3364f0c2014-06-03 22:04:08 +0200229 return 0;
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200230
Andreas Noever3364f0c2014-06-03 22:04:08 +0200231err:
232 tb_tunnel_warn(tunnel, "activation failed\n");
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200233 tb_tunnel_deactivate(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200234 return res;
235}
236
237/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200238 * tb_tunnel_activate() - activate a tunnel
239 * @tunnel: Tunnel to activate
Andreas Noever3364f0c2014-06-03 22:04:08 +0200240 *
241 * Return: Returns 0 on success or an error code on failure.
242 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200243int tb_tunnel_activate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200244{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200245 int i;
246
247 tb_tunnel_info(tunnel, "activating\n");
248
249 for (i = 0; i < tunnel->npaths; i++) {
250 if (tunnel->paths[i]->activated) {
251 tb_tunnel_WARN(tunnel,
252 "trying to activate an already activated tunnel\n");
253 return -EINVAL;
254 }
Andreas Noever3364f0c2014-06-03 22:04:08 +0200255 }
256
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200257 return tb_tunnel_restart(tunnel);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200258}
259
Andreas Noever3364f0c2014-06-03 22:04:08 +0200260/**
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200261 * tb_tunnel_deactivate() - deactivate a tunnel
262 * @tunnel: Tunnel to deactivate
Andreas Noever3364f0c2014-06-03 22:04:08 +0200263 */
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200264void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200265{
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200266 int i;
Andreas Noever3364f0c2014-06-03 22:04:08 +0200267
Mika Westerberg93f36ad2017-02-19 13:48:29 +0200268 tb_tunnel_info(tunnel, "deactivating\n");
269
270 if (tunnel->activate)
271 tunnel->activate(tunnel, false);
272
273 for (i = 0; i < tunnel->npaths; i++) {
274 if (tunnel->paths[i]->activated)
275 tb_path_deactivate(tunnel->paths[i]);
276 }
277}