blob: b56af7b0a09392f51f969a26b75a1f65452881a6 [file] [log] [blame]
Mika Westerbergb0407982019-12-17 15:33:40 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 * USB4 specific functionality
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/ktime.h>
12
Rajmohan Mani02d12852020-03-05 16:33:46 +020013#include "sb_regs.h"
Mika Westerbergb0407982019-12-17 15:33:40 +030014#include "tb.h"
15
Mika Westerbergb0407982019-12-17 15:33:40 +030016#define USB4_DATA_RETRIES 3
17
Rajmohan Mani02d12852020-03-05 16:33:46 +020018enum usb4_sb_target {
19 USB4_SB_TARGET_ROUTER,
20 USB4_SB_TARGET_PARTNER,
21 USB4_SB_TARGET_RETIMER,
22};
23
Mika Westerbergb0407982019-12-17 15:33:40 +030024#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
25#define USB4_NVM_READ_OFFSET_SHIFT 2
26#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
27#define USB4_NVM_READ_LENGTH_SHIFT 24
28
29#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
30#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
31
32#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
33#define USB4_DROM_ADDRESS_SHIFT 2
34#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
35#define USB4_DROM_SIZE_SHIFT 15
36
37#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
38
Mika Westerbergb0407982019-12-17 15:33:40 +030039static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
40 u32 value, int timeout_msec)
41{
42 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
43
44 do {
45 u32 val;
46 int ret;
47
48 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
49 if (ret)
50 return ret;
51
52 if ((val & bit) == value)
53 return 0;
54
55 usleep_range(50, 100);
56 } while (ktime_before(ktime_get(), timeout));
57
58 return -ETIMEDOUT;
59}
60
Mika Westerberg9490f712020-11-03 13:58:00 +020061static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
62 u32 *metadata, u8 *status,
63 const void *tx_data, size_t tx_dwords,
64 void *rx_data, size_t rx_dwords)
Mika Westerbergb0407982019-12-17 15:33:40 +030065{
66 u32 val;
67 int ret;
68
Mika Westerbergfe265a02020-11-02 14:54:23 +020069 if (metadata) {
70 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
71 if (ret)
72 return ret;
73 }
Mika Westerberg83bab442020-11-02 15:09:44 +020074 if (tx_dwords) {
75 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
76 tx_dwords);
77 if (ret)
78 return ret;
79 }
Mika Westerbergfe265a02020-11-02 14:54:23 +020080
Mika Westerbergb0407982019-12-17 15:33:40 +030081 val = opcode | ROUTER_CS_26_OV;
82 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
83 if (ret)
84 return ret;
85
86 ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
87 if (ret)
88 return ret;
89
90 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
Mika Westerbergc3bf9932020-04-09 10:18:10 +030091 if (ret)
92 return ret;
93
Mika Westerbergb0407982019-12-17 15:33:40 +030094 if (val & ROUTER_CS_26_ONS)
95 return -EOPNOTSUPP;
96
Mika Westerberg661b1942020-11-10 11:34:07 +030097 if (status)
98 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
99 ROUTER_CS_26_STATUS_SHIFT;
Mika Westerbergfe265a02020-11-02 14:54:23 +0200100
Mika Westerberg83bab442020-11-02 15:09:44 +0200101 if (metadata) {
102 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
103 if (ret)
104 return ret;
105 }
106 if (rx_dwords) {
107 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
108 rx_dwords);
109 if (ret)
110 return ret;
111 }
112
Mika Westerbergb0407982019-12-17 15:33:40 +0300113 return 0;
114}
115
Mika Westerberg9490f712020-11-03 13:58:00 +0200116static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
117 u8 *status, const void *tx_data, size_t tx_dwords,
118 void *rx_data, size_t rx_dwords)
119{
120 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
121
Mika Westerberg9b383032021-04-01 16:54:15 +0300122 if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
Mika Westerberg9490f712020-11-03 13:58:00 +0200123 return -EINVAL;
124
125 /*
126 * If the connection manager implementation provides USB4 router
127 * operation proxy callback, call it here instead of running the
128 * operation natively.
129 */
130 if (cm_ops->usb4_switch_op) {
131 int ret;
132
133 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
134 tx_data, tx_dwords, rx_data,
135 rx_dwords);
136 if (ret != -EOPNOTSUPP)
137 return ret;
138
139 /*
140 * If the proxy was not supported then run the native
141 * router operation instead.
142 */
143 }
144
145 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
146 tx_dwords, rx_data, rx_dwords);
147}
148
Mika Westerberg83bab442020-11-02 15:09:44 +0200149static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
150 u32 *metadata, u8 *status)
151{
152 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
153}
154
155static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
156 u32 *metadata, u8 *status,
157 const void *tx_data, size_t tx_dwords,
158 void *rx_data, size_t rx_dwords)
159{
160 return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
161 tx_dwords, rx_data, rx_dwords);
Mika Westerbergb0407982019-12-17 15:33:40 +0300162}
163
Mika Westerbergb2911a52019-12-06 18:36:07 +0200164static void usb4_switch_check_wakes(struct tb_switch *sw)
165{
166 struct tb_port *port;
167 bool wakeup = false;
168 u32 val;
169
170 if (!device_may_wakeup(&sw->dev))
171 return;
172
173 if (tb_route(sw)) {
174 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
175 return;
176
177 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
178 (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
179 (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
180
181 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
182 }
183
184 /* Check for any connected downstream ports for USB4 wake */
185 tb_switch_for_each_port(sw, port) {
186 if (!tb_port_has_remote(port))
187 continue;
188
189 if (tb_port_read(port, &val, TB_CFG_PORT,
190 port->cap_usb4 + PORT_CS_18, 1))
191 break;
192
193 tb_port_dbg(port, "USB4 wake: %s\n",
194 (val & PORT_CS_18_WOU4S) ? "yes" : "no");
195
196 if (val & PORT_CS_18_WOU4S)
197 wakeup = true;
198 }
199
200 if (wakeup)
201 pm_wakeup_event(&sw->dev, 0);
202}
203
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200204static bool link_is_usb4(struct tb_port *port)
205{
206 u32 val;
207
208 if (!port->cap_usb4)
209 return false;
210
211 if (tb_port_read(port, &val, TB_CFG_PORT,
212 port->cap_usb4 + PORT_CS_18, 1))
213 return false;
214
215 return !(val & PORT_CS_18_TCM);
216}
217
Mika Westerbergb0407982019-12-17 15:33:40 +0300218/**
219 * usb4_switch_setup() - Additional setup for USB4 device
220 * @sw: USB4 router to setup
221 *
222 * USB4 routers need additional settings in order to enable all the
223 * tunneling. This function enables USB and PCIe tunneling if it can be
224 * enabled (e.g the parent switch also supports them). If USB tunneling
225 * is not available for some reason (like that there is Thunderbolt 3
226 * switch upstream) then the internal xHCI controller is enabled
227 * instead.
228 */
229int usb4_switch_setup(struct tb_switch *sw)
230{
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200231 struct tb_port *downstream_port;
Mika Westerbergb0407982019-12-17 15:33:40 +0300232 struct tb_switch *parent;
233 bool tbt3, xhci;
234 u32 val = 0;
235 int ret;
236
Mika Westerbergb2911a52019-12-06 18:36:07 +0200237 usb4_switch_check_wakes(sw);
238
Mika Westerbergb0407982019-12-17 15:33:40 +0300239 if (!tb_route(sw))
240 return 0;
241
242 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
243 if (ret)
244 return ret;
245
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200246 parent = tb_switch_parent(sw);
247 downstream_port = tb_port_at(tb_route(sw), parent);
248 sw->link_usb4 = link_is_usb4(downstream_port);
249 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
250
Mika Westerbergb0407982019-12-17 15:33:40 +0300251 xhci = val & ROUTER_CS_6_HCI;
252 tbt3 = !(val & ROUTER_CS_6_TNS);
253
254 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
255 tbt3 ? "yes" : "no", xhci ? "yes" : "no");
256
257 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
258 if (ret)
259 return ret;
260
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200261 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
262 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
Rajmohan Manie6f81852019-12-17 15:33:44 +0300263 val |= ROUTER_CS_5_UTO;
264 xhci = false;
265 }
266
Mika Westerbergc6da62a2020-02-18 16:14:42 +0200267 /*
268 * Only enable PCIe tunneling if the parent router supports it
269 * and it is not disabled.
270 */
271 if (tb_acpi_may_tunnel_pcie() &&
272 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
Mika Westerbergb0407982019-12-17 15:33:40 +0300273 val |= ROUTER_CS_5_PTO;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300274 /*
275 * xHCI can be enabled if PCIe tunneling is supported
276 * and the parent does not have any USB3 dowstream
277 * adapters (so we cannot do USB 3.x tunneling).
278 */
Mika Westerbergc7a7ac82020-01-08 15:53:16 +0300279 if (xhci)
Mika Westerbergb0407982019-12-17 15:33:40 +0300280 val |= ROUTER_CS_5_HCO;
281 }
282
283 /* TBT3 supported by the CM */
284 val |= ROUTER_CS_5_C3S;
285 /* Tunneling configuration is ready now */
286 val |= ROUTER_CS_5_CV;
287
288 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
289 if (ret)
290 return ret;
291
292 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
293 ROUTER_CS_6_CR, 50);
294}
295
296/**
297 * usb4_switch_read_uid() - Read UID from USB4 router
298 * @sw: USB4 router
Mika Westerberg21d78d82020-02-14 15:16:38 +0300299 * @uid: UID is stored here
Mika Westerbergb0407982019-12-17 15:33:40 +0300300 *
301 * Reads 64-bit UID from USB4 router config space.
302 */
303int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
304{
305 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
306}
307
Mika Westerberg7e728462020-02-14 19:23:03 +0200308static int usb4_switch_drom_read_block(void *data,
Mika Westerbergb0407982019-12-17 15:33:40 +0300309 unsigned int dwaddress, void *buf,
310 size_t dwords)
311{
Mika Westerberg7e728462020-02-14 19:23:03 +0200312 struct tb_switch *sw = data;
Mika Westerbergb0407982019-12-17 15:33:40 +0300313 u8 status = 0;
314 u32 metadata;
315 int ret;
316
317 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
318 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
319 USB4_DROM_ADDRESS_MASK;
320
Mika Westerberg83bab442020-11-02 15:09:44 +0200321 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
322 &status, NULL, 0, buf, dwords);
Mika Westerbergb0407982019-12-17 15:33:40 +0300323 if (ret)
324 return ret;
325
Mika Westerberg83bab442020-11-02 15:09:44 +0200326 return status ? -EIO : 0;
Mika Westerbergb0407982019-12-17 15:33:40 +0300327}
328
329/**
330 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
331 * @sw: USB4 router
Mika Westerberg21d78d82020-02-14 15:16:38 +0300332 * @address: Byte address inside DROM to start reading
333 * @buf: Buffer where the DROM content is stored
334 * @size: Number of bytes to read from DROM
Mika Westerbergb0407982019-12-17 15:33:40 +0300335 *
336 * Uses USB4 router operations to read router DROM. For devices this
337 * should always work but for hosts it may return %-EOPNOTSUPP in which
338 * case the host router does not have DROM.
339 */
340int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
341 size_t size)
342{
Mika Westerberg9b383032021-04-01 16:54:15 +0300343 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
344 usb4_switch_drom_read_block, sw);
Mika Westerbergb0407982019-12-17 15:33:40 +0300345}
346
Mika Westerbergb0407982019-12-17 15:33:40 +0300347/**
348 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
349 * @sw: USB4 router
350 *
351 * Checks whether conditions are met so that lane bonding can be
352 * established with the upstream router. Call only for device routers.
353 */
354bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
355{
356 struct tb_port *up;
357 int ret;
358 u32 val;
359
360 up = tb_upstream_port(sw);
361 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
362 if (ret)
363 return false;
364
365 return !!(val & PORT_CS_18_BE);
366}
367
368/**
Mika Westerbergb2911a52019-12-06 18:36:07 +0200369 * usb4_switch_set_wake() - Enabled/disable wake
370 * @sw: USB4 router
371 * @flags: Wakeup flags (%0 to disable)
372 *
373 * Enables/disables router to wake up from sleep.
374 */
375int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
376{
377 struct tb_port *port;
378 u64 route = tb_route(sw);
379 u32 val;
380 int ret;
381
382 /*
383 * Enable wakes coming from all USB4 downstream ports (from
384 * child routers). For device routers do this also for the
385 * upstream USB4 port.
386 */
387 tb_switch_for_each_port(sw, port) {
Mika Westerbergf8fa2c22020-10-05 18:55:24 +0300388 if (!tb_port_is_null(port))
389 continue;
Mika Westerbergb2911a52019-12-06 18:36:07 +0200390 if (!route && tb_is_upstream_port(port))
391 continue;
Mika Westerbergf8fa2c22020-10-05 18:55:24 +0300392 if (!port->cap_usb4)
393 continue;
Mika Westerbergb2911a52019-12-06 18:36:07 +0200394
395 ret = tb_port_read(port, &val, TB_CFG_PORT,
396 port->cap_usb4 + PORT_CS_19, 1);
397 if (ret)
398 return ret;
399
400 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
401
Mika Westerberg3caf8882021-01-14 16:41:31 +0200402 if (tb_is_upstream_port(port)) {
Mika Westerbergb2911a52019-12-06 18:36:07 +0200403 val |= PORT_CS_19_WOU4;
Mika Westerberg3caf8882021-01-14 16:41:31 +0200404 } else {
405 bool configured = val & PORT_CS_19_PC;
406
407 if ((flags & TB_WAKE_ON_CONNECT) && !configured)
408 val |= PORT_CS_19_WOC;
409 if ((flags & TB_WAKE_ON_DISCONNECT) && configured)
410 val |= PORT_CS_19_WOD;
411 if ((flags & TB_WAKE_ON_USB4) && configured)
412 val |= PORT_CS_19_WOU4;
413 }
Mika Westerbergb2911a52019-12-06 18:36:07 +0200414
415 ret = tb_port_write(port, &val, TB_CFG_PORT,
416 port->cap_usb4 + PORT_CS_19, 1);
417 if (ret)
418 return ret;
419 }
420
421 /*
Mika Westerberg6026b702021-01-14 16:44:17 +0200422 * Enable wakes from PCIe, USB 3.x and DP on this router. Only
Mika Westerbergb2911a52019-12-06 18:36:07 +0200423 * needed for device routers.
424 */
425 if (route) {
426 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
427 if (ret)
428 return ret;
429
Mika Westerberg6026b702021-01-14 16:44:17 +0200430 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
Mika Westerbergb2911a52019-12-06 18:36:07 +0200431 if (flags & TB_WAKE_ON_USB3)
432 val |= ROUTER_CS_5_WOU;
433 if (flags & TB_WAKE_ON_PCIE)
434 val |= ROUTER_CS_5_WOP;
Mika Westerberg6026b702021-01-14 16:44:17 +0200435 if (flags & TB_WAKE_ON_DP)
436 val |= ROUTER_CS_5_WOD;
Mika Westerbergb2911a52019-12-06 18:36:07 +0200437
438 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
439 if (ret)
440 return ret;
441 }
442
443 return 0;
444}
445
446/**
Mika Westerbergb0407982019-12-17 15:33:40 +0300447 * usb4_switch_set_sleep() - Prepare the router to enter sleep
448 * @sw: USB4 router
449 *
Mika Westerbergb2911a52019-12-06 18:36:07 +0200450 * Sets sleep bit for the router. Returns when the router sleep ready
451 * bit has been asserted.
Mika Westerbergb0407982019-12-17 15:33:40 +0300452 */
453int usb4_switch_set_sleep(struct tb_switch *sw)
454{
455 int ret;
456 u32 val;
457
458 /* Set sleep bit and wait for sleep ready to be asserted */
459 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
460 if (ret)
461 return ret;
462
463 val |= ROUTER_CS_5_SLP;
464
465 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
466 if (ret)
467 return ret;
468
469 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
470 ROUTER_CS_6_SLPR, 500);
471}
472
473/**
474 * usb4_switch_nvm_sector_size() - Return router NVM sector size
475 * @sw: USB4 router
476 *
477 * If the router supports NVM operations this function returns the NVM
478 * sector size in bytes. If NVM operations are not supported returns
479 * %-EOPNOTSUPP.
480 */
481int usb4_switch_nvm_sector_size(struct tb_switch *sw)
482{
483 u32 metadata;
484 u8 status;
485 int ret;
486
Mika Westerbergfe265a02020-11-02 14:54:23 +0200487 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
488 &status);
Mika Westerbergb0407982019-12-17 15:33:40 +0300489 if (ret)
490 return ret;
491
492 if (status)
493 return status == 0x2 ? -EOPNOTSUPP : -EIO;
494
Mika Westerbergb0407982019-12-17 15:33:40 +0300495 return metadata & USB4_NVM_SECTOR_SIZE_MASK;
496}
497
Mika Westerberg7e728462020-02-14 19:23:03 +0200498static int usb4_switch_nvm_read_block(void *data,
Mika Westerbergb0407982019-12-17 15:33:40 +0300499 unsigned int dwaddress, void *buf, size_t dwords)
500{
Mika Westerberg7e728462020-02-14 19:23:03 +0200501 struct tb_switch *sw = data;
Mika Westerbergb0407982019-12-17 15:33:40 +0300502 u8 status = 0;
503 u32 metadata;
504 int ret;
505
506 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
507 USB4_NVM_READ_LENGTH_MASK;
508 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
509 USB4_NVM_READ_OFFSET_MASK;
510
Mika Westerberg83bab442020-11-02 15:09:44 +0200511 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
512 &status, NULL, 0, buf, dwords);
Mika Westerbergb0407982019-12-17 15:33:40 +0300513 if (ret)
514 return ret;
515
Mika Westerberg83bab442020-11-02 15:09:44 +0200516 return status ? -EIO : 0;
Mika Westerbergb0407982019-12-17 15:33:40 +0300517}
518
519/**
520 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
521 * @sw: USB4 router
522 * @address: Starting address in bytes
523 * @buf: Read data is placed here
524 * @size: How many bytes to read
525 *
526 * Reads NVM contents of the router. If NVM is not supported returns
527 * %-EOPNOTSUPP.
528 */
529int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
530 size_t size)
531{
Mika Westerberg9b383032021-04-01 16:54:15 +0300532 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
533 usb4_switch_nvm_read_block, sw);
Mika Westerbergb0407982019-12-17 15:33:40 +0300534}
535
536static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
537 unsigned int address)
538{
539 u32 metadata, dwaddress;
540 u8 status = 0;
541 int ret;
542
543 dwaddress = address / 4;
544 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
545 USB4_NVM_SET_OFFSET_MASK;
546
Mika Westerbergfe265a02020-11-02 14:54:23 +0200547 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
548 &status);
Mika Westerbergb0407982019-12-17 15:33:40 +0300549 if (ret)
550 return ret;
551
552 return status ? -EIO : 0;
553}
554
Mika Westerberg9b383032021-04-01 16:54:15 +0300555static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
556 const void *buf, size_t dwords)
Mika Westerbergb0407982019-12-17 15:33:40 +0300557{
Mika Westerberg7e728462020-02-14 19:23:03 +0200558 struct tb_switch *sw = data;
Mika Westerbergb0407982019-12-17 15:33:40 +0300559 u8 status;
560 int ret;
561
Mika Westerberg83bab442020-11-02 15:09:44 +0200562 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
563 buf, dwords, NULL, 0);
Mika Westerbergb0407982019-12-17 15:33:40 +0300564 if (ret)
565 return ret;
566
567 return status ? -EIO : 0;
568}
569
570/**
571 * usb4_switch_nvm_write() - Write to the router NVM
572 * @sw: USB4 router
573 * @address: Start address where to write in bytes
574 * @buf: Pointer to the data to write
575 * @size: Size of @buf in bytes
576 *
577 * Writes @buf to the router NVM using USB4 router operations. If NVM
578 * write is not supported returns %-EOPNOTSUPP.
579 */
580int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
581 const void *buf, size_t size)
582{
583 int ret;
584
585 ret = usb4_switch_nvm_set_offset(sw, address);
586 if (ret)
587 return ret;
588
Mika Westerberg9b383032021-04-01 16:54:15 +0300589 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
590 usb4_switch_nvm_write_next_block, sw);
Mika Westerbergb0407982019-12-17 15:33:40 +0300591}
592
593/**
594 * usb4_switch_nvm_authenticate() - Authenticate new NVM
595 * @sw: USB4 router
596 *
597 * After the new NVM has been written via usb4_switch_nvm_write(), this
Mika Westerberg661b1942020-11-10 11:34:07 +0300598 * function triggers NVM authentication process. The router gets power
599 * cycled and if the authentication is successful the new NVM starts
Mika Westerbergb0407982019-12-17 15:33:40 +0300600 * running. In case of failure returns negative errno.
Mika Westerberg661b1942020-11-10 11:34:07 +0300601 *
602 * The caller should call usb4_switch_nvm_authenticate_status() to read
603 * the status of the authentication after power cycle. It should be the
604 * first router operation to avoid the status being lost.
Mika Westerbergb0407982019-12-17 15:33:40 +0300605 */
606int usb4_switch_nvm_authenticate(struct tb_switch *sw)
607{
Mika Westerbergb0407982019-12-17 15:33:40 +0300608 int ret;
609
Mika Westerbergfe265a02020-11-02 14:54:23 +0200610 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
Mika Westerberg661b1942020-11-10 11:34:07 +0300611 switch (ret) {
612 /*
613 * The router is power cycled once NVM_AUTH is started so it is
614 * expected to get any of the following errors back.
615 */
616 case -EACCES:
617 case -ENOTCONN:
618 case -ETIMEDOUT:
619 return 0;
620
621 default:
622 return ret;
623 }
624}
625
626/**
627 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
628 * @sw: USB4 router
629 * @status: Status code of the operation
630 *
631 * The function checks if there is status available from the last NVM
632 * authenticate router operation. If there is status then %0 is returned
633 * and the status code is placed in @status. Returns negative errno in case
634 * of failure.
635 *
636 * Must be called before any other router operation.
637 */
638int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
639{
Mika Westerberg9490f712020-11-03 13:58:00 +0200640 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
Mika Westerberg661b1942020-11-10 11:34:07 +0300641 u16 opcode;
642 u32 val;
643 int ret;
644
Mika Westerberg9490f712020-11-03 13:58:00 +0200645 if (cm_ops->usb4_switch_nvm_authenticate_status) {
646 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
647 if (ret != -EOPNOTSUPP)
648 return ret;
649 }
650
Mika Westerberg661b1942020-11-10 11:34:07 +0300651 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
Mika Westerbergb0407982019-12-17 15:33:40 +0300652 if (ret)
653 return ret;
654
Mika Westerberg661b1942020-11-10 11:34:07 +0300655 /* Check that the opcode is correct */
656 opcode = val & ROUTER_CS_26_OPCODE_MASK;
657 if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
658 if (val & ROUTER_CS_26_OV)
659 return -EBUSY;
660 if (val & ROUTER_CS_26_ONS)
661 return -EOPNOTSUPP;
662
663 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
664 ROUTER_CS_26_STATUS_SHIFT;
665 } else {
666 *status = 0;
Mika Westerbergb0407982019-12-17 15:33:40 +0300667 }
Mika Westerberg661b1942020-11-10 11:34:07 +0300668
669 return 0;
Mika Westerbergb0407982019-12-17 15:33:40 +0300670}
671
672/**
673 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
674 * @sw: USB4 router
675 * @in: DP IN adapter
676 *
677 * For DP tunneling this function can be used to query availability of
678 * DP IN resource. Returns true if the resource is available for DP
679 * tunneling, false otherwise.
680 */
681bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
682{
Mika Westerbergfe265a02020-11-02 14:54:23 +0200683 u32 metadata = in->port;
Mika Westerbergb0407982019-12-17 15:33:40 +0300684 u8 status;
685 int ret;
686
Mika Westerbergfe265a02020-11-02 14:54:23 +0200687 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
688 &status);
Mika Westerbergb0407982019-12-17 15:33:40 +0300689 /*
690 * If DP resource allocation is not supported assume it is
691 * always available.
692 */
693 if (ret == -EOPNOTSUPP)
694 return true;
695 else if (ret)
696 return false;
697
698 return !status;
699}
700
701/**
702 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
703 * @sw: USB4 router
704 * @in: DP IN adapter
705 *
706 * Allocates DP IN resource for DP tunneling using USB4 router
707 * operations. If the resource was allocated returns %0. Otherwise
708 * returns negative errno, in particular %-EBUSY if the resource is
709 * already allocated.
710 */
711int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
712{
Mika Westerbergfe265a02020-11-02 14:54:23 +0200713 u32 metadata = in->port;
Mika Westerbergb0407982019-12-17 15:33:40 +0300714 u8 status;
715 int ret;
716
Mika Westerbergfe265a02020-11-02 14:54:23 +0200717 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
718 &status);
Mika Westerbergb0407982019-12-17 15:33:40 +0300719 if (ret == -EOPNOTSUPP)
720 return 0;
721 else if (ret)
722 return ret;
723
724 return status ? -EBUSY : 0;
725}
726
727/**
728 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
729 * @sw: USB4 router
730 * @in: DP IN adapter
731 *
732 * Releases the previously allocated DP IN resource.
733 */
734int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
735{
Mika Westerbergfe265a02020-11-02 14:54:23 +0200736 u32 metadata = in->port;
Mika Westerbergb0407982019-12-17 15:33:40 +0300737 u8 status;
738 int ret;
739
Mika Westerbergfe265a02020-11-02 14:54:23 +0200740 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
741 &status);
Mika Westerbergb0407982019-12-17 15:33:40 +0300742 if (ret == -EOPNOTSUPP)
743 return 0;
744 else if (ret)
745 return ret;
746
747 return status ? -EIO : 0;
748}
749
750static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
751{
752 struct tb_port *p;
753 int usb4_idx = 0;
754
755 /* Assume port is primary */
756 tb_switch_for_each_port(sw, p) {
757 if (!tb_port_is_null(p))
758 continue;
759 if (tb_is_upstream_port(p))
760 continue;
761 if (!p->link_nr) {
762 if (p == port)
763 break;
764 usb4_idx++;
765 }
766 }
767
768 return usb4_idx;
769}
770
771/**
772 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
773 * @sw: USB4 router
774 * @port: USB4 port
775 *
776 * USB4 routers have direct mapping between USB4 ports and PCIe
777 * downstream adapters where the PCIe topology is extended. This
778 * function returns the corresponding downstream PCIe adapter or %NULL
779 * if no such mapping was possible.
780 */
781struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
782 const struct tb_port *port)
783{
784 int usb4_idx = usb4_port_idx(sw, port);
785 struct tb_port *p;
786 int pcie_idx = 0;
787
788 /* Find PCIe down port matching usb4_port */
789 tb_switch_for_each_port(sw, p) {
790 if (!tb_port_is_pcie_down(p))
791 continue;
792
Mika Westerberg9cac51a2020-03-11 16:12:50 +0300793 if (pcie_idx == usb4_idx)
Mika Westerbergb0407982019-12-17 15:33:40 +0300794 return p;
795
796 pcie_idx++;
797 }
798
799 return NULL;
800}
801
802/**
Rajmohan Manie6f81852019-12-17 15:33:44 +0300803 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
804 * @sw: USB4 router
805 * @port: USB4 port
806 *
807 * USB4 routers have direct mapping between USB4 ports and USB 3.x
808 * downstream adapters where the USB 3.x topology is extended. This
809 * function returns the corresponding downstream USB 3.x adapter or
810 * %NULL if no such mapping was possible.
811 */
812struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
813 const struct tb_port *port)
814{
815 int usb4_idx = usb4_port_idx(sw, port);
816 struct tb_port *p;
817 int usb_idx = 0;
818
819 /* Find USB3 down port matching usb4_port */
820 tb_switch_for_each_port(sw, p) {
821 if (!tb_port_is_usb3_down(p))
822 continue;
823
Mika Westerberg77cfa402020-03-11 16:00:46 +0300824 if (usb_idx == usb4_idx)
Rajmohan Manie6f81852019-12-17 15:33:44 +0300825 return p;
826
827 usb_idx++;
828 }
829
830 return NULL;
831}
832
833/**
Mika Westerbergb0407982019-12-17 15:33:40 +0300834 * usb4_port_unlock() - Unlock USB4 downstream port
835 * @port: USB4 port to unlock
836 *
837 * Unlocks USB4 downstream port so that the connection manager can
838 * access the router below this port.
839 */
840int usb4_port_unlock(struct tb_port *port)
841{
842 int ret;
843 u32 val;
844
845 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
846 if (ret)
847 return ret;
848
849 val &= ~ADP_CS_4_LCK;
850 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
851}
Mika Westerberg3b1d8d52020-02-21 23:14:41 +0200852
Mika Westerberge28178b2020-04-02 12:42:44 +0300853static int usb4_port_set_configured(struct tb_port *port, bool configured)
854{
855 int ret;
856 u32 val;
857
858 if (!port->cap_usb4)
859 return -EINVAL;
860
861 ret = tb_port_read(port, &val, TB_CFG_PORT,
862 port->cap_usb4 + PORT_CS_19, 1);
863 if (ret)
864 return ret;
865
866 if (configured)
867 val |= PORT_CS_19_PC;
868 else
869 val &= ~PORT_CS_19_PC;
870
871 return tb_port_write(port, &val, TB_CFG_PORT,
872 port->cap_usb4 + PORT_CS_19, 1);
873}
874
875/**
876 * usb4_port_configure() - Set USB4 port configured
877 * @port: USB4 router
878 *
879 * Sets the USB4 link to be configured for power management purposes.
880 */
881int usb4_port_configure(struct tb_port *port)
882{
883 return usb4_port_set_configured(port, true);
884}
885
886/**
887 * usb4_port_unconfigure() - Set USB4 port unconfigured
888 * @port: USB4 router
889 *
890 * Sets the USB4 link to be unconfigured for power management purposes.
891 */
892void usb4_port_unconfigure(struct tb_port *port)
893{
894 usb4_port_set_configured(port, false);
895}
896
Mika Westerberg284652a2020-04-09 14:23:32 +0300897static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
898{
899 int ret;
900 u32 val;
901
902 if (!port->cap_usb4)
903 return -EINVAL;
904
905 ret = tb_port_read(port, &val, TB_CFG_PORT,
906 port->cap_usb4 + PORT_CS_19, 1);
907 if (ret)
908 return ret;
909
910 if (configured)
911 val |= PORT_CS_19_PID;
912 else
913 val &= ~PORT_CS_19_PID;
914
915 return tb_port_write(port, &val, TB_CFG_PORT,
916 port->cap_usb4 + PORT_CS_19, 1);
917}
918
919/**
920 * usb4_port_configure_xdomain() - Configure port for XDomain
921 * @port: USB4 port connected to another host
922 *
923 * Marks the USB4 port as being connected to another host. Returns %0 in
924 * success and negative errno in failure.
925 */
926int usb4_port_configure_xdomain(struct tb_port *port)
927{
928 return usb4_set_xdomain_configured(port, true);
929}
930
931/**
932 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
933 * @port: USB4 port that was connected to another host
934 *
935 * Clears USB4 port from being marked as XDomain.
936 */
937void usb4_port_unconfigure_xdomain(struct tb_port *port)
938{
939 usb4_set_xdomain_configured(port, false);
940}
941
Mika Westerberg3b1d8d52020-02-21 23:14:41 +0200942static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
943 u32 value, int timeout_msec)
944{
945 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
946
947 do {
948 u32 val;
949 int ret;
950
951 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
952 if (ret)
953 return ret;
954
955 if ((val & bit) == value)
956 return 0;
957
958 usleep_range(50, 100);
959 } while (ktime_before(ktime_get(), timeout));
960
961 return -ETIMEDOUT;
962}
963
Rajmohan Mani02d12852020-03-05 16:33:46 +0200964static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
965{
Mika Westerberg9b383032021-04-01 16:54:15 +0300966 if (dwords > NVM_DATA_DWORDS)
Rajmohan Mani02d12852020-03-05 16:33:46 +0200967 return -EINVAL;
968
969 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
970 dwords);
971}
972
973static int usb4_port_write_data(struct tb_port *port, const void *data,
974 size_t dwords)
975{
Mika Westerberg9b383032021-04-01 16:54:15 +0300976 if (dwords > NVM_DATA_DWORDS)
Rajmohan Mani02d12852020-03-05 16:33:46 +0200977 return -EINVAL;
978
979 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
980 dwords);
981}
982
983static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
984 u8 index, u8 reg, void *buf, u8 size)
985{
986 size_t dwords = DIV_ROUND_UP(size, 4);
987 int ret;
988 u32 val;
989
990 if (!port->cap_usb4)
991 return -EINVAL;
992
993 val = reg;
994 val |= size << PORT_CS_1_LENGTH_SHIFT;
995 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
996 if (target == USB4_SB_TARGET_RETIMER)
997 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
998 val |= PORT_CS_1_PND;
999
1000 ret = tb_port_write(port, &val, TB_CFG_PORT,
1001 port->cap_usb4 + PORT_CS_1, 1);
1002 if (ret)
1003 return ret;
1004
1005 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1006 PORT_CS_1_PND, 0, 500);
1007 if (ret)
1008 return ret;
1009
1010 ret = tb_port_read(port, &val, TB_CFG_PORT,
1011 port->cap_usb4 + PORT_CS_1, 1);
1012 if (ret)
1013 return ret;
1014
1015 if (val & PORT_CS_1_NR)
1016 return -ENODEV;
1017 if (val & PORT_CS_1_RC)
1018 return -EIO;
1019
1020 return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1021}
1022
1023static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1024 u8 index, u8 reg, const void *buf, u8 size)
1025{
1026 size_t dwords = DIV_ROUND_UP(size, 4);
1027 int ret;
1028 u32 val;
1029
1030 if (!port->cap_usb4)
1031 return -EINVAL;
1032
1033 if (buf) {
1034 ret = usb4_port_write_data(port, buf, dwords);
1035 if (ret)
1036 return ret;
1037 }
1038
1039 val = reg;
1040 val |= size << PORT_CS_1_LENGTH_SHIFT;
1041 val |= PORT_CS_1_WNR_WRITE;
1042 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1043 if (target == USB4_SB_TARGET_RETIMER)
1044 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1045 val |= PORT_CS_1_PND;
1046
1047 ret = tb_port_write(port, &val, TB_CFG_PORT,
1048 port->cap_usb4 + PORT_CS_1, 1);
1049 if (ret)
1050 return ret;
1051
1052 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1053 PORT_CS_1_PND, 0, 500);
1054 if (ret)
1055 return ret;
1056
1057 ret = tb_port_read(port, &val, TB_CFG_PORT,
1058 port->cap_usb4 + PORT_CS_1, 1);
1059 if (ret)
1060 return ret;
1061
1062 if (val & PORT_CS_1_NR)
1063 return -ENODEV;
1064 if (val & PORT_CS_1_RC)
1065 return -EIO;
1066
1067 return 0;
1068}
1069
1070static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1071 u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1072{
1073 ktime_t timeout;
1074 u32 val;
1075 int ret;
1076
1077 val = opcode;
1078 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1079 sizeof(val));
1080 if (ret)
1081 return ret;
1082
1083 timeout = ktime_add_ms(ktime_get(), timeout_msec);
1084
1085 do {
1086 /* Check results */
1087 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1088 &val, sizeof(val));
1089 if (ret)
1090 return ret;
1091
1092 switch (val) {
1093 case 0:
1094 return 0;
1095
1096 case USB4_SB_OPCODE_ERR:
1097 return -EAGAIN;
1098
1099 case USB4_SB_OPCODE_ONS:
1100 return -EOPNOTSUPP;
1101
1102 default:
1103 if (val != opcode)
1104 return -EIO;
1105 break;
1106 }
1107 } while (ktime_before(ktime_get(), timeout));
1108
1109 return -ETIMEDOUT;
1110}
1111
1112/**
1113 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1114 * @port: USB4 port
1115 *
1116 * This forces the USB4 port to send broadcast RT transaction which
1117 * makes the retimers on the link to assign index to themselves. Returns
1118 * %0 in case of success and negative errno if there was an error.
1119 */
1120int usb4_port_enumerate_retimers(struct tb_port *port)
1121{
1122 u32 val;
1123
1124 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1125 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1126 USB4_SB_OPCODE, &val, sizeof(val));
1127}
1128
1129static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1130 enum usb4_sb_opcode opcode,
1131 int timeout_msec)
1132{
1133 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1134 timeout_msec);
1135}
1136
1137/**
1138 * usb4_port_retimer_read() - Read from retimer sideband registers
1139 * @port: USB4 port
1140 * @index: Retimer index
1141 * @reg: Sideband register to read
1142 * @buf: Data from @reg is stored here
1143 * @size: Number of bytes to read
1144 *
1145 * Function reads retimer sideband registers starting from @reg. The
1146 * retimer is connected to @port at @index. Returns %0 in case of
1147 * success, and read data is copied to @buf. If there is no retimer
1148 * present at given @index returns %-ENODEV. In any other failure
1149 * returns negative errno.
1150 */
1151int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1152 u8 size)
1153{
1154 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1155 size);
1156}
1157
1158/**
1159 * usb4_port_retimer_write() - Write to retimer sideband registers
1160 * @port: USB4 port
1161 * @index: Retimer index
1162 * @reg: Sideband register to write
1163 * @buf: Data that is written starting from @reg
1164 * @size: Number of bytes to write
1165 *
1166 * Writes retimer sideband registers starting from @reg. The retimer is
1167 * connected to @port at @index. Returns %0 in case of success. If there
1168 * is no retimer present at given @index returns %-ENODEV. In any other
1169 * failure returns negative errno.
1170 */
1171int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1172 const void *buf, u8 size)
1173{
1174 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1175 size);
1176}
1177
1178/**
1179 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1180 * @port: USB4 port
1181 * @index: Retimer index
1182 *
1183 * If the retimer at @index is last one (connected directly to the
1184 * Type-C port) this function returns %1. If it is not returns %0. If
1185 * the retimer is not present returns %-ENODEV. Otherwise returns
1186 * negative errno.
1187 */
1188int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1189{
1190 u32 metadata;
1191 int ret;
1192
1193 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1194 500);
1195 if (ret)
1196 return ret;
1197
1198 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1199 sizeof(metadata));
1200 return ret ? ret : metadata & 1;
1201}
1202
1203/**
1204 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1205 * @port: USB4 port
1206 * @index: Retimer index
1207 *
1208 * Reads NVM sector size (in bytes) of a retimer at @index. This
1209 * operation can be used to determine whether the retimer supports NVM
1210 * upgrade for example. Returns sector size in bytes or negative errno
1211 * in case of error. Specifically returns %-ENODEV if there is no
1212 * retimer at @index.
1213 */
1214int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1215{
1216 u32 metadata;
1217 int ret;
1218
1219 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1220 500);
1221 if (ret)
1222 return ret;
1223
1224 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1225 sizeof(metadata));
1226 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1227}
1228
1229static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1230 unsigned int address)
1231{
1232 u32 metadata, dwaddress;
1233 int ret;
1234
1235 dwaddress = address / 4;
1236 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1237 USB4_NVM_SET_OFFSET_MASK;
1238
1239 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1240 sizeof(metadata));
1241 if (ret)
1242 return ret;
1243
1244 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1245 500);
1246}
1247
1248struct retimer_info {
1249 struct tb_port *port;
1250 u8 index;
1251};
1252
Mika Westerberg9b383032021-04-01 16:54:15 +03001253static int usb4_port_retimer_nvm_write_next_block(void *data,
1254 unsigned int dwaddress, const void *buf, size_t dwords)
Rajmohan Mani02d12852020-03-05 16:33:46 +02001255
1256{
1257 const struct retimer_info *info = data;
1258 struct tb_port *port = info->port;
1259 u8 index = info->index;
1260 int ret;
1261
1262 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1263 buf, dwords * 4);
1264 if (ret)
1265 return ret;
1266
1267 return usb4_port_retimer_op(port, index,
1268 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1269}
1270
1271/**
1272 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1273 * @port: USB4 port
1274 * @index: Retimer index
1275 * @address: Byte address where to start the write
1276 * @buf: Data to write
1277 * @size: Size in bytes how much to write
1278 *
1279 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1280 * upgrade. Returns %0 if the data was written successfully and negative
1281 * errno in case of failure. Specifically returns %-ENODEV if there is
1282 * no retimer at @index.
1283 */
1284int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1285 const void *buf, size_t size)
1286{
1287 struct retimer_info info = { .port = port, .index = index };
1288 int ret;
1289
1290 ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1291 if (ret)
1292 return ret;
1293
Mika Westerberg9b383032021-04-01 16:54:15 +03001294 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
1295 usb4_port_retimer_nvm_write_next_block, &info);
Rajmohan Mani02d12852020-03-05 16:33:46 +02001296}
1297
1298/**
1299 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1300 * @port: USB4 port
1301 * @index: Retimer index
1302 *
1303 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1304 * this function can be used to trigger the NVM upgrade process. If
1305 * successful the retimer restarts with the new NVM and may not have the
1306 * index set so one needs to call usb4_port_enumerate_retimers() to
1307 * force index to be assigned.
1308 */
1309int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1310{
1311 u32 val;
1312
1313 /*
1314 * We need to use the raw operation here because once the
1315 * authentication completes the retimer index is not set anymore
1316 * so we do not get back the status now.
1317 */
1318 val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1319 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1320 USB4_SB_OPCODE, &val, sizeof(val));
1321}
1322
1323/**
1324 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1325 * @port: USB4 port
1326 * @index: Retimer index
1327 * @status: Raw status code read from metadata
1328 *
1329 * This can be called after usb4_port_retimer_nvm_authenticate() and
1330 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1331 *
1332 * Returns %0 if the authentication status was successfully read. The
1333 * completion metadata (the result) is then stored into @status. If
1334 * reading the status fails, returns negative errno.
1335 */
1336int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1337 u32 *status)
1338{
1339 u32 metadata, val;
1340 int ret;
1341
1342 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1343 sizeof(val));
1344 if (ret)
1345 return ret;
1346
1347 switch (val) {
1348 case 0:
1349 *status = 0;
1350 return 0;
1351
1352 case USB4_SB_OPCODE_ERR:
1353 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1354 &metadata, sizeof(metadata));
1355 if (ret)
1356 return ret;
1357
1358 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1359 return 0;
1360
1361 case USB4_SB_OPCODE_ONS:
1362 return -EOPNOTSUPP;
1363
1364 default:
1365 return -EIO;
1366 }
1367}
1368
1369static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1370 void *buf, size_t dwords)
1371{
1372 const struct retimer_info *info = data;
1373 struct tb_port *port = info->port;
1374 u8 index = info->index;
1375 u32 metadata;
1376 int ret;
1377
1378 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
Mika Westerberg9b383032021-04-01 16:54:15 +03001379 if (dwords < NVM_DATA_DWORDS)
Rajmohan Mani02d12852020-03-05 16:33:46 +02001380 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1381
1382 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1383 sizeof(metadata));
1384 if (ret)
1385 return ret;
1386
1387 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1388 if (ret)
1389 return ret;
1390
1391 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1392 dwords * 4);
1393}
1394
1395/**
1396 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1397 * @port: USB4 port
1398 * @index: Retimer index
1399 * @address: NVM address (in bytes) to start reading
1400 * @buf: Data read from NVM is stored here
1401 * @size: Number of bytes to read
1402 *
1403 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1404 * read was successful and negative errno in case of failure.
1405 * Specifically returns %-ENODEV if there is no retimer at @index.
1406 */
1407int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1408 unsigned int address, void *buf, size_t size)
1409{
1410 struct retimer_info info = { .port = port, .index = index };
1411
Mika Westerberg9b383032021-04-01 16:54:15 +03001412 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
1413 usb4_port_retimer_nvm_read_block, &info);
Rajmohan Mani02d12852020-03-05 16:33:46 +02001414}
1415
Mika Westerberg3b1d8d52020-02-21 23:14:41 +02001416/**
1417 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1418 * @port: USB3 adapter port
1419 *
1420 * Return maximum supported link rate of a USB3 adapter in Mb/s.
1421 * Negative errno in case of error.
1422 */
1423int usb4_usb3_port_max_link_rate(struct tb_port *port)
1424{
1425 int ret, lr;
1426 u32 val;
1427
1428 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1429 return -EINVAL;
1430
1431 ret = tb_port_read(port, &val, TB_CFG_PORT,
1432 port->cap_adap + ADP_USB3_CS_4, 1);
1433 if (ret)
1434 return ret;
1435
1436 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1437 return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1438}
1439
1440/**
1441 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1442 * @port: USB3 adapter port
1443 *
1444 * Return actual established link rate of a USB3 adapter in Mb/s. If the
1445 * link is not up returns %0 and negative errno in case of failure.
1446 */
1447int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1448{
1449 int ret, lr;
1450 u32 val;
1451
1452 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1453 return -EINVAL;
1454
1455 ret = tb_port_read(port, &val, TB_CFG_PORT,
1456 port->cap_adap + ADP_USB3_CS_4, 1);
1457 if (ret)
1458 return ret;
1459
1460 if (!(val & ADP_USB3_CS_4_ULV))
1461 return 0;
1462
1463 lr = val & ADP_USB3_CS_4_ALR_MASK;
1464 return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1465}
1466
1467static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1468{
1469 int ret;
1470 u32 val;
1471
1472 if (!tb_port_is_usb3_down(port))
1473 return -EINVAL;
1474 if (tb_route(port->sw))
1475 return -EINVAL;
1476
1477 ret = tb_port_read(port, &val, TB_CFG_PORT,
1478 port->cap_adap + ADP_USB3_CS_2, 1);
1479 if (ret)
1480 return ret;
1481
1482 if (request)
1483 val |= ADP_USB3_CS_2_CMR;
1484 else
1485 val &= ~ADP_USB3_CS_2_CMR;
1486
1487 ret = tb_port_write(port, &val, TB_CFG_PORT,
1488 port->cap_adap + ADP_USB3_CS_2, 1);
1489 if (ret)
1490 return ret;
1491
1492 /*
1493 * We can use val here directly as the CMR bit is in the same place
1494 * as HCA. Just mask out others.
1495 */
1496 val &= ADP_USB3_CS_2_CMR;
1497 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1498 ADP_USB3_CS_1_HCA, val, 1500);
1499}
1500
1501static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1502{
1503 return usb4_usb3_port_cm_request(port, true);
1504}
1505
1506static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1507{
1508 return usb4_usb3_port_cm_request(port, false);
1509}
1510
1511static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1512{
1513 unsigned long uframes;
1514
Colin Ian King4c767ce2020-06-30 15:55:58 +01001515 uframes = bw * 512UL << scale;
Mika Westerberg3b1d8d52020-02-21 23:14:41 +02001516 return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1517}
1518
1519static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1520{
1521 unsigned long uframes;
1522
1523 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1524 uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
Colin Ian King4c767ce2020-06-30 15:55:58 +01001525 return DIV_ROUND_UP(uframes, 512UL << scale);
Mika Westerberg3b1d8d52020-02-21 23:14:41 +02001526}
1527
1528static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1529 int *upstream_bw,
1530 int *downstream_bw)
1531{
1532 u32 val, bw, scale;
1533 int ret;
1534
1535 ret = tb_port_read(port, &val, TB_CFG_PORT,
1536 port->cap_adap + ADP_USB3_CS_2, 1);
1537 if (ret)
1538 return ret;
1539
1540 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1541 port->cap_adap + ADP_USB3_CS_3, 1);
1542 if (ret)
1543 return ret;
1544
1545 scale &= ADP_USB3_CS_3_SCALE_MASK;
1546
1547 bw = val & ADP_USB3_CS_2_AUBW_MASK;
1548 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1549
1550 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
1551 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1552
1553 return 0;
1554}
1555
1556/**
1557 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
1558 * @port: USB3 adapter port
1559 * @upstream_bw: Allocated upstream bandwidth is stored here
1560 * @downstream_bw: Allocated downstream bandwidth is stored here
1561 *
1562 * Stores currently allocated USB3 bandwidth into @upstream_bw and
1563 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
1564 * errno in failure.
1565 */
1566int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1567 int *downstream_bw)
1568{
1569 int ret;
1570
1571 ret = usb4_usb3_port_set_cm_request(port);
1572 if (ret)
1573 return ret;
1574
1575 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
1576 downstream_bw);
1577 usb4_usb3_port_clear_cm_request(port);
1578
1579 return ret;
1580}
1581
1582static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
1583 int *upstream_bw,
1584 int *downstream_bw)
1585{
1586 u32 val, bw, scale;
1587 int ret;
1588
1589 ret = tb_port_read(port, &val, TB_CFG_PORT,
1590 port->cap_adap + ADP_USB3_CS_1, 1);
1591 if (ret)
1592 return ret;
1593
1594 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1595 port->cap_adap + ADP_USB3_CS_3, 1);
1596 if (ret)
1597 return ret;
1598
1599 scale &= ADP_USB3_CS_3_SCALE_MASK;
1600
1601 bw = val & ADP_USB3_CS_1_CUBW_MASK;
1602 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1603
1604 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
1605 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1606
1607 return 0;
1608}
1609
1610static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
1611 int upstream_bw,
1612 int downstream_bw)
1613{
1614 u32 val, ubw, dbw, scale;
1615 int ret;
1616
1617 /* Read the used scale, hardware default is 0 */
1618 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1619 port->cap_adap + ADP_USB3_CS_3, 1);
1620 if (ret)
1621 return ret;
1622
1623 scale &= ADP_USB3_CS_3_SCALE_MASK;
1624 ubw = mbps_to_usb3_bw(upstream_bw, scale);
1625 dbw = mbps_to_usb3_bw(downstream_bw, scale);
1626
1627 ret = tb_port_read(port, &val, TB_CFG_PORT,
1628 port->cap_adap + ADP_USB3_CS_2, 1);
1629 if (ret)
1630 return ret;
1631
1632 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
1633 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
1634 val |= ubw;
1635
1636 return tb_port_write(port, &val, TB_CFG_PORT,
1637 port->cap_adap + ADP_USB3_CS_2, 1);
1638}
1639
1640/**
1641 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
1642 * @port: USB3 adapter port
1643 * @upstream_bw: New upstream bandwidth
1644 * @downstream_bw: New downstream bandwidth
1645 *
1646 * This can be used to set how much bandwidth is allocated for the USB3
1647 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
1648 * new values programmed to the USB3 adapter allocation registers. If
1649 * the values are lower than what is currently consumed the allocation
1650 * is set to what is currently consumed instead (consumed bandwidth
1651 * cannot be taken away by CM). The actual new values are returned in
1652 * @upstream_bw and @downstream_bw.
1653 *
1654 * Returns %0 in case of success and negative errno if there was a
1655 * failure.
1656 */
1657int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1658 int *downstream_bw)
1659{
1660 int ret, consumed_up, consumed_down, allocate_up, allocate_down;
1661
1662 ret = usb4_usb3_port_set_cm_request(port);
1663 if (ret)
1664 return ret;
1665
1666 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1667 &consumed_down);
1668 if (ret)
1669 goto err_request;
1670
1671 /* Don't allow it go lower than what is consumed */
1672 allocate_up = max(*upstream_bw, consumed_up);
1673 allocate_down = max(*downstream_bw, consumed_down);
1674
1675 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
1676 allocate_down);
1677 if (ret)
1678 goto err_request;
1679
1680 *upstream_bw = allocate_up;
1681 *downstream_bw = allocate_down;
1682
1683err_request:
1684 usb4_usb3_port_clear_cm_request(port);
1685 return ret;
1686}
1687
1688/**
1689 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
1690 * @port: USB3 adapter port
1691 * @upstream_bw: New allocated upstream bandwidth
1692 * @downstream_bw: New allocated downstream bandwidth
1693 *
1694 * Releases USB3 allocated bandwidth down to what is actually consumed.
1695 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
1696 *
1697 * Returns 0% in success and negative errno in case of failure.
1698 */
1699int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1700 int *downstream_bw)
1701{
1702 int ret, consumed_up, consumed_down;
1703
1704 ret = usb4_usb3_port_set_cm_request(port);
1705 if (ret)
1706 return ret;
1707
1708 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1709 &consumed_down);
1710 if (ret)
1711 goto err_request;
1712
1713 /*
1714 * Always keep 1000 Mb/s to make sure xHCI has at least some
1715 * bandwidth available for isochronous traffic.
1716 */
1717 if (consumed_up < 1000)
1718 consumed_up = 1000;
1719 if (consumed_down < 1000)
1720 consumed_down = 1000;
1721
1722 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
1723 consumed_down);
1724 if (ret)
1725 goto err_request;
1726
1727 *upstream_bw = consumed_up;
1728 *downstream_bw = consumed_down;
1729
1730err_request:
1731 usb4_usb3_port_clear_cm_request(port);
1732 return ret;
1733}