blob: 53495a38b4eb69dba9352cbde05548bbf31386ee [file] [log] [blame]
Mika Westerberga9be5582019-01-09 16:42:12 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt link controller support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include "tb.h"
10
11/**
12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
13 * @sw: Switch whose UUID is read
14 * @uuid: UUID is placed here
15 */
16int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
17{
18 if (!sw->cap_lc)
19 return -EINVAL;
20 return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
21}
Mika Westerberge879a702018-10-11 12:33:08 +030022
23static int read_lc_desc(struct tb_switch *sw, u32 *desc)
24{
25 if (!sw->cap_lc)
26 return -EINVAL;
27 return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
28}
29
30static int find_port_lc_cap(struct tb_port *port)
31{
32 struct tb_switch *sw = port->sw;
33 int start, phys, ret, size;
34 u32 desc;
35
36 ret = read_lc_desc(sw, &desc);
37 if (ret)
38 return ret;
39
40 /* Start of port LC registers */
41 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 phys = tb_phy_port_from_link(port->port);
44
45 return sw->cap_lc + start + phys * size;
46}
47
Mika Westerberge28178b2020-04-02 12:42:44 +030048static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
Mika Westerberge879a702018-10-11 12:33:08 +030049{
50 bool upstream = tb_is_upstream_port(port);
51 struct tb_switch *sw = port->sw;
52 u32 ctrl, lane;
53 int cap, ret;
54
55 if (sw->generation < 2)
56 return 0;
57
58 cap = find_port_lc_cap(port);
59 if (cap < 0)
60 return cap;
61
62 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
63 if (ret)
64 return ret;
65
66 /* Resolve correct lane */
67 if (port->port % 2)
68 lane = TB_LC_SX_CTRL_L1C;
69 else
70 lane = TB_LC_SX_CTRL_L2C;
71
Mika Westerberge28178b2020-04-02 12:42:44 +030072 if (configured) {
Mika Westerberge879a702018-10-11 12:33:08 +030073 ctrl |= lane;
74 if (upstream)
75 ctrl |= TB_LC_SX_CTRL_UPSTREAM;
76 } else {
77 ctrl &= ~lane;
78 if (upstream)
79 ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
80 }
81
82 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
83}
84
85/**
Mika Westerberge28178b2020-04-02 12:42:44 +030086 * tb_lc_configure_port() - Let LC know about configured port
87 * @port: Port that is set as configured
Mika Westerberge879a702018-10-11 12:33:08 +030088 *
Mika Westerberge28178b2020-04-02 12:42:44 +030089 * Sets the port configured for power management purposes.
Mika Westerberge879a702018-10-11 12:33:08 +030090 */
Mika Westerberge28178b2020-04-02 12:42:44 +030091int tb_lc_configure_port(struct tb_port *port)
Mika Westerberge879a702018-10-11 12:33:08 +030092{
Mika Westerberge28178b2020-04-02 12:42:44 +030093 return tb_lc_set_port_configured(port, true);
Mika Westerberge879a702018-10-11 12:33:08 +030094}
95
96/**
Mika Westerberge28178b2020-04-02 12:42:44 +030097 * tb_lc_unconfigure_port() - Let LC know about unconfigured port
98 * @port: Port that is set as configured
Mika Westerberge879a702018-10-11 12:33:08 +030099 *
Mika Westerberge28178b2020-04-02 12:42:44 +0300100 * Sets the port unconfigured for power management purposes.
Mika Westerberge879a702018-10-11 12:33:08 +0300101 */
Mika Westerberge28178b2020-04-02 12:42:44 +0300102void tb_lc_unconfigure_port(struct tb_port *port)
Mika Westerberge879a702018-10-11 12:33:08 +0300103{
Mika Westerberge28178b2020-04-02 12:42:44 +0300104 tb_lc_set_port_configured(port, false);
Mika Westerberge879a702018-10-11 12:33:08 +0300105}
Mika Westerberg5480dfc2019-01-09 17:25:43 +0200106
Mika Westerberg284652a2020-04-09 14:23:32 +0300107static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
108{
109 struct tb_switch *sw = port->sw;
110 u32 ctrl, lane;
111 int cap, ret;
112
113 if (sw->generation < 2)
114 return 0;
115
116 cap = find_port_lc_cap(port);
117 if (cap < 0)
118 return cap;
119
120 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
121 if (ret)
122 return ret;
123
124 /* Resolve correct lane */
125 if (port->port % 2)
126 lane = TB_LC_SX_CTRL_L1D;
127 else
128 lane = TB_LC_SX_CTRL_L2D;
129
130 if (configure)
131 ctrl |= lane;
132 else
133 ctrl &= ~lane;
134
135 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
136}
137
138/**
139 * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
140 * @port: Switch downstream port connected to another host
141 *
142 * Sets the lane configured for XDomain accordingly so that the LC knows
143 * about this. Returns %0 in success and negative errno in failure.
144 */
145int tb_lc_configure_xdomain(struct tb_port *port)
146{
147 return tb_lc_set_xdomain_configured(port, true);
148}
149
150/**
151 * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
152 * @port: Switch downstream port that was connected to another host
153 *
154 * Unsets the lane XDomain configuration.
155 */
156void tb_lc_unconfigure_xdomain(struct tb_port *port)
157{
158 tb_lc_set_xdomain_configured(port, false);
159}
160
Mika Westerbergfdb08872020-11-26 12:52:43 +0300161/**
162 * tb_lc_start_lane_initialization() - Start lane initialization
163 * @port: Device router lane 0 adapter
164 *
165 * Starts lane initialization for @port after the router resumed from
166 * sleep. Should be called for those downstream lane adapters that were
167 * not connected (tb_lc_configure_port() was not called) before sleep.
168 *
169 * Returns %0 in success and negative errno in case of failure.
170 */
171int tb_lc_start_lane_initialization(struct tb_port *port)
172{
173 struct tb_switch *sw = port->sw;
174 int ret, cap;
175 u32 ctrl;
176
177 if (!tb_route(sw))
178 return 0;
179
180 if (sw->generation < 2)
181 return 0;
182
183 cap = find_port_lc_cap(port);
184 if (cap < 0)
185 return cap;
186
187 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
188 if (ret)
189 return ret;
190
191 ctrl |= TB_LC_SX_CTRL_SLI;
192
193 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
194}
195
Gil Fine43f977b2021-12-17 03:16:43 +0200196/**
197 * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter
198 * @port: Lane adapter
199 *
200 * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
201 * active cables (if connected on the link).
202 */
203bool tb_lc_is_clx_supported(struct tb_port *port)
204{
205 struct tb_switch *sw = port->sw;
206 int cap, ret;
207 u32 val;
208
209 cap = find_port_lc_cap(port);
210 if (cap < 0)
211 return false;
212
213 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1);
214 if (ret)
215 return false;
216
217 return !!(val & TB_LC_LINK_ATTR_CPS);
218}
219
Mika Westerbergb2911a52019-12-06 18:36:07 +0200220static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
221 unsigned int flags)
222{
223 u32 ctrl;
224 int ret;
225
226 /*
227 * Enable wake on PCIe and USB4 (wake coming from another
228 * router).
229 */
230 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
231 offset + TB_LC_SX_CTRL, 1);
232 if (ret)
233 return ret;
234
Mika Westerberg6026b702021-01-14 16:44:17 +0200235 ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC |
236 TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4);
Mika Westerbergb2911a52019-12-06 18:36:07 +0200237
238 if (flags & TB_WAKE_ON_CONNECT)
239 ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
240 if (flags & TB_WAKE_ON_USB4)
241 ctrl |= TB_LC_SX_CTRL_WOU4;
242 if (flags & TB_WAKE_ON_PCIE)
243 ctrl |= TB_LC_SX_CTRL_WOP;
Mika Westerberg6026b702021-01-14 16:44:17 +0200244 if (flags & TB_WAKE_ON_DP)
245 ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD;
Mika Westerbergb2911a52019-12-06 18:36:07 +0200246
247 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
248}
249
250/**
251 * tb_lc_set_wake() - Enable/disable wake
252 * @sw: Switch whose wakes to configure
253 * @flags: Wakeup flags (%0 to disable)
254 *
255 * For each LC sets wake bits accordingly.
256 */
257int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
258{
259 int start, size, nlc, ret, i;
260 u32 desc;
261
262 if (sw->generation < 2)
263 return 0;
264
265 if (!tb_route(sw))
266 return 0;
267
268 ret = read_lc_desc(sw, &desc);
269 if (ret)
270 return ret;
271
272 /* Figure out number of link controllers */
273 nlc = desc & TB_LC_DESC_NLC_MASK;
274 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
275 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
276
277 /* For each link controller set sleep bit */
278 for (i = 0; i < nlc; i++) {
279 unsigned int offset = sw->cap_lc + start + i * size;
280
281 ret = tb_lc_set_wake_one(sw, offset, flags);
282 if (ret)
283 return ret;
284 }
285
286 return 0;
287}
288
Mika Westerberg5480dfc2019-01-09 17:25:43 +0200289/**
290 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
291 * @sw: Switch to set sleep
292 *
293 * Let the switch link controllers know that the switch is going to
294 * sleep.
295 */
296int tb_lc_set_sleep(struct tb_switch *sw)
297{
298 int start, size, nlc, ret, i;
299 u32 desc;
300
301 if (sw->generation < 2)
302 return 0;
303
304 ret = read_lc_desc(sw, &desc);
305 if (ret)
306 return ret;
307
308 /* Figure out number of link controllers */
309 nlc = desc & TB_LC_DESC_NLC_MASK;
310 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
311 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
312
313 /* For each link controller set sleep bit */
314 for (i = 0; i < nlc; i++) {
315 unsigned int offset = sw->cap_lc + start + i * size;
316 u32 ctrl;
317
318 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
319 offset + TB_LC_SX_CTRL, 1);
320 if (ret)
321 return ret;
322
323 ctrl |= TB_LC_SX_CTRL_SLP;
324 ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
325 offset + TB_LC_SX_CTRL, 1);
326 if (ret)
327 return ret;
328 }
329
330 return 0;
331}
Mika Westerberg91c0c122019-03-21 19:03:00 +0200332
333/**
334 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
335 * @sw: Switch to check
336 *
337 * Checks whether conditions for lane bonding from parent to @sw are
338 * possible.
339 */
340bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
341{
342 struct tb_port *up;
343 int cap, ret;
344 u32 val;
345
346 if (sw->generation < 2)
347 return false;
348
349 up = tb_upstream_port(sw);
350 cap = find_port_lc_cap(up);
351 if (cap < 0)
352 return false;
353
354 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
355 if (ret)
356 return false;
357
358 return !!(val & TB_LC_PORT_ATTR_BE);
359}
Mika Westerberg8afe9092019-03-26 15:52:30 +0300360
361static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
362 struct tb_port *in)
363{
364 struct tb_port *port;
365
366 /* The first DP IN port is sink 0 and second is sink 1 */
367 tb_switch_for_each_port(sw, port) {
368 if (tb_port_is_dpin(port))
369 return in != port;
370 }
371
372 return -EINVAL;
373}
374
375static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
376{
377 u32 val, alloc;
378 int ret;
379
380 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
381 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
382 if (ret)
383 return ret;
384
385 /*
386 * Sink is available for CM/SW to use if the allocation valie is
387 * either 0 or 1.
388 */
389 if (!sink) {
390 alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
391 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
392 return 0;
393 } else {
394 alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
395 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
396 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
397 return 0;
398 }
399
400 return -EBUSY;
401}
402
403/**
404 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
405 * @sw: Switch whose DP sink is queried
406 * @in: DP IN port to check
407 *
408 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
409 * for the given DP IN port or not.
410 */
411bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
412{
413 int sink;
414
415 /*
416 * For older generations sink is always available as there is no
417 * allocation mechanism.
418 */
419 if (sw->generation < 3)
420 return true;
421
422 sink = tb_lc_dp_sink_from_port(sw, in);
423 if (sink < 0)
424 return false;
425
426 return !tb_lc_dp_sink_available(sw, sink);
427}
428
429/**
430 * tb_lc_dp_sink_alloc() - Allocate DP sink
431 * @sw: Switch whose DP sink is allocated
432 * @in: DP IN port the DP sink is allocated for
433 *
434 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
435 * resource is available and allocation is successful returns %0. In all
436 * other cases returs negative errno. In particular %-EBUSY is returned if
437 * the resource was not available.
438 */
439int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
440{
441 int ret, sink;
442 u32 val;
443
444 if (sw->generation < 3)
445 return 0;
446
447 sink = tb_lc_dp_sink_from_port(sw, in);
448 if (sink < 0)
449 return sink;
450
451 ret = tb_lc_dp_sink_available(sw, sink);
452 if (ret)
453 return ret;
454
455 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
456 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
457 if (ret)
458 return ret;
459
460 if (!sink) {
461 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
462 val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
463 } else {
464 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
465 val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
466 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
467 }
468
469 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
470 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
471
472 if (ret)
473 return ret;
474
475 tb_port_dbg(in, "sink %d allocated\n", sink);
476 return 0;
477}
478
479/**
480 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
481 * @sw: Switch whose DP sink is de-allocated
482 * @in: DP IN port whose DP sink is de-allocated
483 *
484 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
485 */
486int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
487{
488 int ret, sink;
489 u32 val;
490
491 if (sw->generation < 3)
492 return 0;
493
494 sink = tb_lc_dp_sink_from_port(sw, in);
495 if (sink < 0)
496 return sink;
497
498 /* Needs to be owned by CM/SW */
499 ret = tb_lc_dp_sink_available(sw, sink);
500 if (ret)
501 return ret;
502
503 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
504 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
505 if (ret)
506 return ret;
507
508 if (!sink)
509 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
510 else
511 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
512
513 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
514 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
515 if (ret)
516 return ret;
517
518 tb_port_dbg(in, "sink %d de-allocated\n", sink);
519 return 0;
520}
Mario Limonciello1cb36292020-06-23 11:14:29 -0500521
522/**
523 * tb_lc_force_power() - Forces LC to be powered on
524 * @sw: Thunderbolt switch
525 *
526 * This is useful to let authentication cycle pass even without
527 * a Thunderbolt link present.
528 */
529int tb_lc_force_power(struct tb_switch *sw)
530{
531 u32 in = 0xffff;
532
533 return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
534}