blob: 2b8355e6b65f4d6825cbafbcbd910f9196e8f590 [file] [log] [blame]
Mika Westerbergb0407982019-12-17 15:33:40 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 * USB4 specific functionality
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/ktime.h>
12
Rajmohan Mani02d12852020-03-05 16:33:46 +020013#include "sb_regs.h"
Mika Westerbergb0407982019-12-17 15:33:40 +030014#include "tb.h"
15
16#define USB4_DATA_DWORDS 16
17#define USB4_DATA_RETRIES 3
18
19enum usb4_switch_op {
20 USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
21 USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
22 USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
23 USB4_SWITCH_OP_NVM_WRITE = 0x20,
24 USB4_SWITCH_OP_NVM_AUTH = 0x21,
25 USB4_SWITCH_OP_NVM_READ = 0x22,
26 USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
27 USB4_SWITCH_OP_DROM_READ = 0x24,
28 USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
29};
30
Rajmohan Mani02d12852020-03-05 16:33:46 +020031enum usb4_sb_target {
32 USB4_SB_TARGET_ROUTER,
33 USB4_SB_TARGET_PARTNER,
34 USB4_SB_TARGET_RETIMER,
35};
36
Mika Westerbergb0407982019-12-17 15:33:40 +030037#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
38#define USB4_NVM_READ_OFFSET_SHIFT 2
39#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
40#define USB4_NVM_READ_LENGTH_SHIFT 24
41
42#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
43#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
44
45#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
46#define USB4_DROM_ADDRESS_SHIFT 2
47#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
48#define USB4_DROM_SIZE_SHIFT 15
49
50#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
51
Mika Westerberg7e728462020-02-14 19:23:03 +020052typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
53typedef int (*write_block_fn)(void *, const void *, size_t);
Mika Westerbergb0407982019-12-17 15:33:40 +030054
55static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
56 u32 value, int timeout_msec)
57{
58 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
59
60 do {
61 u32 val;
62 int ret;
63
64 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
65 if (ret)
66 return ret;
67
68 if ((val & bit) == value)
69 return 0;
70
71 usleep_range(50, 100);
72 } while (ktime_before(ktime_get(), timeout));
73
74 return -ETIMEDOUT;
75}
76
77static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
78 size_t dwords)
79{
80 if (dwords > USB4_DATA_DWORDS)
81 return -EINVAL;
82
83 return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
84}
85
86static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
87 size_t dwords)
88{
89 if (dwords > USB4_DATA_DWORDS)
90 return -EINVAL;
91
92 return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
93}
94
95static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
96{
97 return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
98}
99
100static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
101{
102 return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
103}
104
Mika Westerberg7e728462020-02-14 19:23:03 +0200105static int usb4_do_read_data(u16 address, void *buf, size_t size,
106 read_block_fn read_block, void *read_block_data)
Mika Westerbergb0407982019-12-17 15:33:40 +0300107{
108 unsigned int retries = USB4_DATA_RETRIES;
109 unsigned int offset;
110
111 offset = address & 3;
112 address = address & ~3;
113
114 do {
115 size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
116 unsigned int dwaddress, dwords;
117 u8 data[USB4_DATA_DWORDS * 4];
118 int ret;
119
120 dwaddress = address / 4;
121 dwords = ALIGN(nbytes, 4) / 4;
122
Mika Westerberg7e728462020-02-14 19:23:03 +0200123 ret = read_block(read_block_data, dwaddress, data, dwords);
Mika Westerbergb0407982019-12-17 15:33:40 +0300124 if (ret) {
Mika Westerberg6bfe3342020-02-14 19:25:34 +0200125 if (ret != -ENODEV && retries--)
126 continue;
Mika Westerbergb0407982019-12-17 15:33:40 +0300127 return ret;
128 }
129
130 memcpy(buf, data + offset, nbytes);
131
132 size -= nbytes;
133 address += nbytes;
134 buf += nbytes;
135 } while (size > 0);
136
137 return 0;
138}
139
Mika Westerberg7e728462020-02-14 19:23:03 +0200140static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
141 write_block_fn write_next_block, void *write_block_data)
Mika Westerbergb0407982019-12-17 15:33:40 +0300142{
143 unsigned int retries = USB4_DATA_RETRIES;
144 unsigned int offset;
145
146 offset = address & 3;
147 address = address & ~3;
148
149 do {
150 u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
151 u8 data[USB4_DATA_DWORDS * 4];
152 int ret;
153
154 memcpy(data + offset, buf, nbytes);
155
Mika Westerberg7e728462020-02-14 19:23:03 +0200156 ret = write_next_block(write_block_data, data, nbytes / 4);
Mika Westerbergb0407982019-12-17 15:33:40 +0300157 if (ret) {
158 if (ret == -ETIMEDOUT) {
159 if (retries--)
160 continue;
161 ret = -EIO;
162 }
163 return ret;
164 }
165
166 size -= nbytes;
167 address += nbytes;
168 buf += nbytes;
169 } while (size > 0);
170
171 return 0;
172}
173
174static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
175{
176 u32 val;
177 int ret;
178
179 val = opcode | ROUTER_CS_26_OV;
180 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
181 if (ret)
182 return ret;
183
184 ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
185 if (ret)
186 return ret;
187
188 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
Mika Westerbergc3bf9932020-04-09 10:18:10 +0300189 if (ret)
190 return ret;
191
Mika Westerbergb0407982019-12-17 15:33:40 +0300192 if (val & ROUTER_CS_26_ONS)
193 return -EOPNOTSUPP;
194
195 *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
196 return 0;
197}
198
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200199static bool link_is_usb4(struct tb_port *port)
200{
201 u32 val;
202
203 if (!port->cap_usb4)
204 return false;
205
206 if (tb_port_read(port, &val, TB_CFG_PORT,
207 port->cap_usb4 + PORT_CS_18, 1))
208 return false;
209
210 return !(val & PORT_CS_18_TCM);
211}
212
Mika Westerbergb0407982019-12-17 15:33:40 +0300213/**
214 * usb4_switch_setup() - Additional setup for USB4 device
215 * @sw: USB4 router to setup
216 *
217 * USB4 routers need additional settings in order to enable all the
218 * tunneling. This function enables USB and PCIe tunneling if it can be
219 * enabled (e.g the parent switch also supports them). If USB tunneling
220 * is not available for some reason (like that there is Thunderbolt 3
221 * switch upstream) then the internal xHCI controller is enabled
222 * instead.
223 */
224int usb4_switch_setup(struct tb_switch *sw)
225{
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200226 struct tb_port *downstream_port;
Mika Westerbergb0407982019-12-17 15:33:40 +0300227 struct tb_switch *parent;
228 bool tbt3, xhci;
229 u32 val = 0;
230 int ret;
231
232 if (!tb_route(sw))
233 return 0;
234
235 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
236 if (ret)
237 return ret;
238
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200239 parent = tb_switch_parent(sw);
240 downstream_port = tb_port_at(tb_route(sw), parent);
241 sw->link_usb4 = link_is_usb4(downstream_port);
242 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
243
Mika Westerbergb0407982019-12-17 15:33:40 +0300244 xhci = val & ROUTER_CS_6_HCI;
245 tbt3 = !(val & ROUTER_CS_6_TNS);
246
247 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
248 tbt3 ? "yes" : "no", xhci ? "yes" : "no");
249
250 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
251 if (ret)
252 return ret;
253
Mika Westerbergbbcf40b2020-03-04 17:09:14 +0200254 if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
Rajmohan Manie6f81852019-12-17 15:33:44 +0300255 val |= ROUTER_CS_5_UTO;
256 xhci = false;
257 }
258
Mika Westerbergb0407982019-12-17 15:33:40 +0300259 /* Only enable PCIe tunneling if the parent router supports it */
260 if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
261 val |= ROUTER_CS_5_PTO;
Rajmohan Manie6f81852019-12-17 15:33:44 +0300262 /*
263 * xHCI can be enabled if PCIe tunneling is supported
264 * and the parent does not have any USB3 dowstream
265 * adapters (so we cannot do USB 3.x tunneling).
266 */
Mika Westerbergc7a7ac82020-01-08 15:53:16 +0300267 if (xhci)
Mika Westerbergb0407982019-12-17 15:33:40 +0300268 val |= ROUTER_CS_5_HCO;
269 }
270
271 /* TBT3 supported by the CM */
272 val |= ROUTER_CS_5_C3S;
273 /* Tunneling configuration is ready now */
274 val |= ROUTER_CS_5_CV;
275
276 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
277 if (ret)
278 return ret;
279
280 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
281 ROUTER_CS_6_CR, 50);
282}
283
284/**
285 * usb4_switch_read_uid() - Read UID from USB4 router
286 * @sw: USB4 router
Mika Westerberg21d78d82020-02-14 15:16:38 +0300287 * @uid: UID is stored here
Mika Westerbergb0407982019-12-17 15:33:40 +0300288 *
289 * Reads 64-bit UID from USB4 router config space.
290 */
291int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
292{
293 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
294}
295
Mika Westerberg7e728462020-02-14 19:23:03 +0200296static int usb4_switch_drom_read_block(void *data,
Mika Westerbergb0407982019-12-17 15:33:40 +0300297 unsigned int dwaddress, void *buf,
298 size_t dwords)
299{
Mika Westerberg7e728462020-02-14 19:23:03 +0200300 struct tb_switch *sw = data;
Mika Westerbergb0407982019-12-17 15:33:40 +0300301 u8 status = 0;
302 u32 metadata;
303 int ret;
304
305 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
306 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
307 USB4_DROM_ADDRESS_MASK;
308
309 ret = usb4_switch_op_write_metadata(sw, metadata);
310 if (ret)
311 return ret;
312
313 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
314 if (ret)
315 return ret;
316
317 if (status)
318 return -EIO;
319
320 return usb4_switch_op_read_data(sw, buf, dwords);
321}
322
323/**
324 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
325 * @sw: USB4 router
Mika Westerberg21d78d82020-02-14 15:16:38 +0300326 * @address: Byte address inside DROM to start reading
327 * @buf: Buffer where the DROM content is stored
328 * @size: Number of bytes to read from DROM
Mika Westerbergb0407982019-12-17 15:33:40 +0300329 *
330 * Uses USB4 router operations to read router DROM. For devices this
331 * should always work but for hosts it may return %-EOPNOTSUPP in which
332 * case the host router does not have DROM.
333 */
334int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
335 size_t size)
336{
Mika Westerberg7e728462020-02-14 19:23:03 +0200337 return usb4_do_read_data(address, buf, size,
338 usb4_switch_drom_read_block, sw);
Mika Westerbergb0407982019-12-17 15:33:40 +0300339}
340
341static int usb4_set_port_configured(struct tb_port *port, bool configured)
342{
343 int ret;
344 u32 val;
345
346 ret = tb_port_read(port, &val, TB_CFG_PORT,
347 port->cap_usb4 + PORT_CS_19, 1);
348 if (ret)
349 return ret;
350
351 if (configured)
352 val |= PORT_CS_19_PC;
353 else
354 val &= ~PORT_CS_19_PC;
355
356 return tb_port_write(port, &val, TB_CFG_PORT,
357 port->cap_usb4 + PORT_CS_19, 1);
358}
359
360/**
361 * usb4_switch_configure_link() - Set upstream USB4 link configured
362 * @sw: USB4 router
363 *
364 * Sets the upstream USB4 link to be configured for power management
365 * purposes.
366 */
367int usb4_switch_configure_link(struct tb_switch *sw)
368{
369 struct tb_port *up;
370
371 if (!tb_route(sw))
372 return 0;
373
374 up = tb_upstream_port(sw);
375 return usb4_set_port_configured(up, true);
376}
377
378/**
379 * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
380 * @sw: USB4 router
381 *
382 * Reverse of usb4_switch_configure_link().
383 */
384void usb4_switch_unconfigure_link(struct tb_switch *sw)
385{
386 struct tb_port *up;
387
388 if (sw->is_unplugged || !tb_route(sw))
389 return;
390
391 up = tb_upstream_port(sw);
392 usb4_set_port_configured(up, false);
393}
394
395/**
396 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
397 * @sw: USB4 router
398 *
399 * Checks whether conditions are met so that lane bonding can be
400 * established with the upstream router. Call only for device routers.
401 */
402bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
403{
404 struct tb_port *up;
405 int ret;
406 u32 val;
407
408 up = tb_upstream_port(sw);
409 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
410 if (ret)
411 return false;
412
413 return !!(val & PORT_CS_18_BE);
414}
415
416/**
417 * usb4_switch_set_sleep() - Prepare the router to enter sleep
418 * @sw: USB4 router
419 *
420 * Enables wakes and sets sleep bit for the router. Returns when the
421 * router sleep ready bit has been asserted.
422 */
423int usb4_switch_set_sleep(struct tb_switch *sw)
424{
425 int ret;
426 u32 val;
427
428 /* Set sleep bit and wait for sleep ready to be asserted */
429 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
430 if (ret)
431 return ret;
432
433 val |= ROUTER_CS_5_SLP;
434
435 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
436 if (ret)
437 return ret;
438
439 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
440 ROUTER_CS_6_SLPR, 500);
441}
442
443/**
444 * usb4_switch_nvm_sector_size() - Return router NVM sector size
445 * @sw: USB4 router
446 *
447 * If the router supports NVM operations this function returns the NVM
448 * sector size in bytes. If NVM operations are not supported returns
449 * %-EOPNOTSUPP.
450 */
451int usb4_switch_nvm_sector_size(struct tb_switch *sw)
452{
453 u32 metadata;
454 u8 status;
455 int ret;
456
457 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
458 if (ret)
459 return ret;
460
461 if (status)
462 return status == 0x2 ? -EOPNOTSUPP : -EIO;
463
464 ret = usb4_switch_op_read_metadata(sw, &metadata);
465 if (ret)
466 return ret;
467
468 return metadata & USB4_NVM_SECTOR_SIZE_MASK;
469}
470
Mika Westerberg7e728462020-02-14 19:23:03 +0200471static int usb4_switch_nvm_read_block(void *data,
Mika Westerbergb0407982019-12-17 15:33:40 +0300472 unsigned int dwaddress, void *buf, size_t dwords)
473{
Mika Westerberg7e728462020-02-14 19:23:03 +0200474 struct tb_switch *sw = data;
Mika Westerbergb0407982019-12-17 15:33:40 +0300475 u8 status = 0;
476 u32 metadata;
477 int ret;
478
479 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
480 USB4_NVM_READ_LENGTH_MASK;
481 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
482 USB4_NVM_READ_OFFSET_MASK;
483
484 ret = usb4_switch_op_write_metadata(sw, metadata);
485 if (ret)
486 return ret;
487
488 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
489 if (ret)
490 return ret;
491
492 if (status)
493 return -EIO;
494
495 return usb4_switch_op_read_data(sw, buf, dwords);
496}
497
498/**
499 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
500 * @sw: USB4 router
501 * @address: Starting address in bytes
502 * @buf: Read data is placed here
503 * @size: How many bytes to read
504 *
505 * Reads NVM contents of the router. If NVM is not supported returns
506 * %-EOPNOTSUPP.
507 */
508int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
509 size_t size)
510{
Mika Westerberg7e728462020-02-14 19:23:03 +0200511 return usb4_do_read_data(address, buf, size,
512 usb4_switch_nvm_read_block, sw);
Mika Westerbergb0407982019-12-17 15:33:40 +0300513}
514
515static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
516 unsigned int address)
517{
518 u32 metadata, dwaddress;
519 u8 status = 0;
520 int ret;
521
522 dwaddress = address / 4;
523 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
524 USB4_NVM_SET_OFFSET_MASK;
525
526 ret = usb4_switch_op_write_metadata(sw, metadata);
527 if (ret)
528 return ret;
529
530 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
531 if (ret)
532 return ret;
533
534 return status ? -EIO : 0;
535}
536
Mika Westerberg7e728462020-02-14 19:23:03 +0200537static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
538 size_t dwords)
Mika Westerbergb0407982019-12-17 15:33:40 +0300539{
Mika Westerberg7e728462020-02-14 19:23:03 +0200540 struct tb_switch *sw = data;
Mika Westerbergb0407982019-12-17 15:33:40 +0300541 u8 status;
542 int ret;
543
544 ret = usb4_switch_op_write_data(sw, buf, dwords);
545 if (ret)
546 return ret;
547
548 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
549 if (ret)
550 return ret;
551
552 return status ? -EIO : 0;
553}
554
555/**
556 * usb4_switch_nvm_write() - Write to the router NVM
557 * @sw: USB4 router
558 * @address: Start address where to write in bytes
559 * @buf: Pointer to the data to write
560 * @size: Size of @buf in bytes
561 *
562 * Writes @buf to the router NVM using USB4 router operations. If NVM
563 * write is not supported returns %-EOPNOTSUPP.
564 */
565int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
566 const void *buf, size_t size)
567{
568 int ret;
569
570 ret = usb4_switch_nvm_set_offset(sw, address);
571 if (ret)
572 return ret;
573
Mika Westerberg7e728462020-02-14 19:23:03 +0200574 return usb4_do_write_data(address, buf, size,
575 usb4_switch_nvm_write_next_block, sw);
Mika Westerbergb0407982019-12-17 15:33:40 +0300576}
577
578/**
579 * usb4_switch_nvm_authenticate() - Authenticate new NVM
580 * @sw: USB4 router
581 *
582 * After the new NVM has been written via usb4_switch_nvm_write(), this
583 * function triggers NVM authentication process. If the authentication
584 * is successful the router is power cycled and the new NVM starts
585 * running. In case of failure returns negative errno.
586 */
587int usb4_switch_nvm_authenticate(struct tb_switch *sw)
588{
589 u8 status = 0;
590 int ret;
591
592 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
593 if (ret)
594 return ret;
595
596 switch (status) {
597 case 0x0:
598 tb_sw_dbg(sw, "NVM authentication successful\n");
599 return 0;
600 case 0x1:
601 return -EINVAL;
602 case 0x2:
603 return -EAGAIN;
604 case 0x3:
605 return -EOPNOTSUPP;
606 default:
607 return -EIO;
608 }
609}
610
611/**
612 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
613 * @sw: USB4 router
614 * @in: DP IN adapter
615 *
616 * For DP tunneling this function can be used to query availability of
617 * DP IN resource. Returns true if the resource is available for DP
618 * tunneling, false otherwise.
619 */
620bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
621{
622 u8 status;
623 int ret;
624
625 ret = usb4_switch_op_write_metadata(sw, in->port);
626 if (ret)
627 return false;
628
629 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
630 /*
631 * If DP resource allocation is not supported assume it is
632 * always available.
633 */
634 if (ret == -EOPNOTSUPP)
635 return true;
636 else if (ret)
637 return false;
638
639 return !status;
640}
641
642/**
643 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
644 * @sw: USB4 router
645 * @in: DP IN adapter
646 *
647 * Allocates DP IN resource for DP tunneling using USB4 router
648 * operations. If the resource was allocated returns %0. Otherwise
649 * returns negative errno, in particular %-EBUSY if the resource is
650 * already allocated.
651 */
652int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
653{
654 u8 status;
655 int ret;
656
657 ret = usb4_switch_op_write_metadata(sw, in->port);
658 if (ret)
659 return ret;
660
661 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
662 if (ret == -EOPNOTSUPP)
663 return 0;
664 else if (ret)
665 return ret;
666
667 return status ? -EBUSY : 0;
668}
669
670/**
671 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
672 * @sw: USB4 router
673 * @in: DP IN adapter
674 *
675 * Releases the previously allocated DP IN resource.
676 */
677int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
678{
679 u8 status;
680 int ret;
681
682 ret = usb4_switch_op_write_metadata(sw, in->port);
683 if (ret)
684 return ret;
685
686 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
687 if (ret == -EOPNOTSUPP)
688 return 0;
689 else if (ret)
690 return ret;
691
692 return status ? -EIO : 0;
693}
694
695static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
696{
697 struct tb_port *p;
698 int usb4_idx = 0;
699
700 /* Assume port is primary */
701 tb_switch_for_each_port(sw, p) {
702 if (!tb_port_is_null(p))
703 continue;
704 if (tb_is_upstream_port(p))
705 continue;
706 if (!p->link_nr) {
707 if (p == port)
708 break;
709 usb4_idx++;
710 }
711 }
712
713 return usb4_idx;
714}
715
716/**
717 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
718 * @sw: USB4 router
719 * @port: USB4 port
720 *
721 * USB4 routers have direct mapping between USB4 ports and PCIe
722 * downstream adapters where the PCIe topology is extended. This
723 * function returns the corresponding downstream PCIe adapter or %NULL
724 * if no such mapping was possible.
725 */
726struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
727 const struct tb_port *port)
728{
729 int usb4_idx = usb4_port_idx(sw, port);
730 struct tb_port *p;
731 int pcie_idx = 0;
732
733 /* Find PCIe down port matching usb4_port */
734 tb_switch_for_each_port(sw, p) {
735 if (!tb_port_is_pcie_down(p))
736 continue;
737
Mika Westerberg9cac51a2020-03-11 16:12:50 +0300738 if (pcie_idx == usb4_idx)
Mika Westerbergb0407982019-12-17 15:33:40 +0300739 return p;
740
741 pcie_idx++;
742 }
743
744 return NULL;
745}
746
747/**
Rajmohan Manie6f81852019-12-17 15:33:44 +0300748 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
749 * @sw: USB4 router
750 * @port: USB4 port
751 *
752 * USB4 routers have direct mapping between USB4 ports and USB 3.x
753 * downstream adapters where the USB 3.x topology is extended. This
754 * function returns the corresponding downstream USB 3.x adapter or
755 * %NULL if no such mapping was possible.
756 */
757struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
758 const struct tb_port *port)
759{
760 int usb4_idx = usb4_port_idx(sw, port);
761 struct tb_port *p;
762 int usb_idx = 0;
763
764 /* Find USB3 down port matching usb4_port */
765 tb_switch_for_each_port(sw, p) {
766 if (!tb_port_is_usb3_down(p))
767 continue;
768
Mika Westerberg77cfa402020-03-11 16:00:46 +0300769 if (usb_idx == usb4_idx)
Rajmohan Manie6f81852019-12-17 15:33:44 +0300770 return p;
771
772 usb_idx++;
773 }
774
775 return NULL;
776}
777
778/**
Mika Westerbergb0407982019-12-17 15:33:40 +0300779 * usb4_port_unlock() - Unlock USB4 downstream port
780 * @port: USB4 port to unlock
781 *
782 * Unlocks USB4 downstream port so that the connection manager can
783 * access the router below this port.
784 */
785int usb4_port_unlock(struct tb_port *port)
786{
787 int ret;
788 u32 val;
789
790 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
791 if (ret)
792 return ret;
793
794 val &= ~ADP_CS_4_LCK;
795 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
796}
Mika Westerberg3b1d8d52020-02-21 23:14:41 +0200797
798static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
799 u32 value, int timeout_msec)
800{
801 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
802
803 do {
804 u32 val;
805 int ret;
806
807 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
808 if (ret)
809 return ret;
810
811 if ((val & bit) == value)
812 return 0;
813
814 usleep_range(50, 100);
815 } while (ktime_before(ktime_get(), timeout));
816
817 return -ETIMEDOUT;
818}
819
Rajmohan Mani02d12852020-03-05 16:33:46 +0200820static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
821{
822 if (dwords > USB4_DATA_DWORDS)
823 return -EINVAL;
824
825 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
826 dwords);
827}
828
829static int usb4_port_write_data(struct tb_port *port, const void *data,
830 size_t dwords)
831{
832 if (dwords > USB4_DATA_DWORDS)
833 return -EINVAL;
834
835 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
836 dwords);
837}
838
839static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
840 u8 index, u8 reg, void *buf, u8 size)
841{
842 size_t dwords = DIV_ROUND_UP(size, 4);
843 int ret;
844 u32 val;
845
846 if (!port->cap_usb4)
847 return -EINVAL;
848
849 val = reg;
850 val |= size << PORT_CS_1_LENGTH_SHIFT;
851 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
852 if (target == USB4_SB_TARGET_RETIMER)
853 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
854 val |= PORT_CS_1_PND;
855
856 ret = tb_port_write(port, &val, TB_CFG_PORT,
857 port->cap_usb4 + PORT_CS_1, 1);
858 if (ret)
859 return ret;
860
861 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
862 PORT_CS_1_PND, 0, 500);
863 if (ret)
864 return ret;
865
866 ret = tb_port_read(port, &val, TB_CFG_PORT,
867 port->cap_usb4 + PORT_CS_1, 1);
868 if (ret)
869 return ret;
870
871 if (val & PORT_CS_1_NR)
872 return -ENODEV;
873 if (val & PORT_CS_1_RC)
874 return -EIO;
875
876 return buf ? usb4_port_read_data(port, buf, dwords) : 0;
877}
878
879static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
880 u8 index, u8 reg, const void *buf, u8 size)
881{
882 size_t dwords = DIV_ROUND_UP(size, 4);
883 int ret;
884 u32 val;
885
886 if (!port->cap_usb4)
887 return -EINVAL;
888
889 if (buf) {
890 ret = usb4_port_write_data(port, buf, dwords);
891 if (ret)
892 return ret;
893 }
894
895 val = reg;
896 val |= size << PORT_CS_1_LENGTH_SHIFT;
897 val |= PORT_CS_1_WNR_WRITE;
898 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
899 if (target == USB4_SB_TARGET_RETIMER)
900 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
901 val |= PORT_CS_1_PND;
902
903 ret = tb_port_write(port, &val, TB_CFG_PORT,
904 port->cap_usb4 + PORT_CS_1, 1);
905 if (ret)
906 return ret;
907
908 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
909 PORT_CS_1_PND, 0, 500);
910 if (ret)
911 return ret;
912
913 ret = tb_port_read(port, &val, TB_CFG_PORT,
914 port->cap_usb4 + PORT_CS_1, 1);
915 if (ret)
916 return ret;
917
918 if (val & PORT_CS_1_NR)
919 return -ENODEV;
920 if (val & PORT_CS_1_RC)
921 return -EIO;
922
923 return 0;
924}
925
926static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
927 u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
928{
929 ktime_t timeout;
930 u32 val;
931 int ret;
932
933 val = opcode;
934 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
935 sizeof(val));
936 if (ret)
937 return ret;
938
939 timeout = ktime_add_ms(ktime_get(), timeout_msec);
940
941 do {
942 /* Check results */
943 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
944 &val, sizeof(val));
945 if (ret)
946 return ret;
947
948 switch (val) {
949 case 0:
950 return 0;
951
952 case USB4_SB_OPCODE_ERR:
953 return -EAGAIN;
954
955 case USB4_SB_OPCODE_ONS:
956 return -EOPNOTSUPP;
957
958 default:
959 if (val != opcode)
960 return -EIO;
961 break;
962 }
963 } while (ktime_before(ktime_get(), timeout));
964
965 return -ETIMEDOUT;
966}
967
968/**
969 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
970 * @port: USB4 port
971 *
972 * This forces the USB4 port to send broadcast RT transaction which
973 * makes the retimers on the link to assign index to themselves. Returns
974 * %0 in case of success and negative errno if there was an error.
975 */
976int usb4_port_enumerate_retimers(struct tb_port *port)
977{
978 u32 val;
979
980 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
981 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
982 USB4_SB_OPCODE, &val, sizeof(val));
983}
984
985static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
986 enum usb4_sb_opcode opcode,
987 int timeout_msec)
988{
989 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
990 timeout_msec);
991}
992
993/**
994 * usb4_port_retimer_read() - Read from retimer sideband registers
995 * @port: USB4 port
996 * @index: Retimer index
997 * @reg: Sideband register to read
998 * @buf: Data from @reg is stored here
999 * @size: Number of bytes to read
1000 *
1001 * Function reads retimer sideband registers starting from @reg. The
1002 * retimer is connected to @port at @index. Returns %0 in case of
1003 * success, and read data is copied to @buf. If there is no retimer
1004 * present at given @index returns %-ENODEV. In any other failure
1005 * returns negative errno.
1006 */
1007int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1008 u8 size)
1009{
1010 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1011 size);
1012}
1013
1014/**
1015 * usb4_port_retimer_write() - Write to retimer sideband registers
1016 * @port: USB4 port
1017 * @index: Retimer index
1018 * @reg: Sideband register to write
1019 * @buf: Data that is written starting from @reg
1020 * @size: Number of bytes to write
1021 *
1022 * Writes retimer sideband registers starting from @reg. The retimer is
1023 * connected to @port at @index. Returns %0 in case of success. If there
1024 * is no retimer present at given @index returns %-ENODEV. In any other
1025 * failure returns negative errno.
1026 */
1027int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1028 const void *buf, u8 size)
1029{
1030 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1031 size);
1032}
1033
1034/**
1035 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1036 * @port: USB4 port
1037 * @index: Retimer index
1038 *
1039 * If the retimer at @index is last one (connected directly to the
1040 * Type-C port) this function returns %1. If it is not returns %0. If
1041 * the retimer is not present returns %-ENODEV. Otherwise returns
1042 * negative errno.
1043 */
1044int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1045{
1046 u32 metadata;
1047 int ret;
1048
1049 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1050 500);
1051 if (ret)
1052 return ret;
1053
1054 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1055 sizeof(metadata));
1056 return ret ? ret : metadata & 1;
1057}
1058
1059/**
1060 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1061 * @port: USB4 port
1062 * @index: Retimer index
1063 *
1064 * Reads NVM sector size (in bytes) of a retimer at @index. This
1065 * operation can be used to determine whether the retimer supports NVM
1066 * upgrade for example. Returns sector size in bytes or negative errno
1067 * in case of error. Specifically returns %-ENODEV if there is no
1068 * retimer at @index.
1069 */
1070int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1071{
1072 u32 metadata;
1073 int ret;
1074
1075 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1076 500);
1077 if (ret)
1078 return ret;
1079
1080 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1081 sizeof(metadata));
1082 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1083}
1084
1085static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1086 unsigned int address)
1087{
1088 u32 metadata, dwaddress;
1089 int ret;
1090
1091 dwaddress = address / 4;
1092 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1093 USB4_NVM_SET_OFFSET_MASK;
1094
1095 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1096 sizeof(metadata));
1097 if (ret)
1098 return ret;
1099
1100 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1101 500);
1102}
1103
1104struct retimer_info {
1105 struct tb_port *port;
1106 u8 index;
1107};
1108
1109static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
1110 size_t dwords)
1111
1112{
1113 const struct retimer_info *info = data;
1114 struct tb_port *port = info->port;
1115 u8 index = info->index;
1116 int ret;
1117
1118 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1119 buf, dwords * 4);
1120 if (ret)
1121 return ret;
1122
1123 return usb4_port_retimer_op(port, index,
1124 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1125}
1126
1127/**
1128 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1129 * @port: USB4 port
1130 * @index: Retimer index
1131 * @address: Byte address where to start the write
1132 * @buf: Data to write
1133 * @size: Size in bytes how much to write
1134 *
1135 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1136 * upgrade. Returns %0 if the data was written successfully and negative
1137 * errno in case of failure. Specifically returns %-ENODEV if there is
1138 * no retimer at @index.
1139 */
1140int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1141 const void *buf, size_t size)
1142{
1143 struct retimer_info info = { .port = port, .index = index };
1144 int ret;
1145
1146 ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1147 if (ret)
1148 return ret;
1149
1150 return usb4_do_write_data(address, buf, size,
1151 usb4_port_retimer_nvm_write_next_block, &info);
1152}
1153
1154/**
1155 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1156 * @port: USB4 port
1157 * @index: Retimer index
1158 *
1159 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1160 * this function can be used to trigger the NVM upgrade process. If
1161 * successful the retimer restarts with the new NVM and may not have the
1162 * index set so one needs to call usb4_port_enumerate_retimers() to
1163 * force index to be assigned.
1164 */
1165int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1166{
1167 u32 val;
1168
1169 /*
1170 * We need to use the raw operation here because once the
1171 * authentication completes the retimer index is not set anymore
1172 * so we do not get back the status now.
1173 */
1174 val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1175 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1176 USB4_SB_OPCODE, &val, sizeof(val));
1177}
1178
1179/**
1180 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1181 * @port: USB4 port
1182 * @index: Retimer index
1183 * @status: Raw status code read from metadata
1184 *
1185 * This can be called after usb4_port_retimer_nvm_authenticate() and
1186 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1187 *
1188 * Returns %0 if the authentication status was successfully read. The
1189 * completion metadata (the result) is then stored into @status. If
1190 * reading the status fails, returns negative errno.
1191 */
1192int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1193 u32 *status)
1194{
1195 u32 metadata, val;
1196 int ret;
1197
1198 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1199 sizeof(val));
1200 if (ret)
1201 return ret;
1202
1203 switch (val) {
1204 case 0:
1205 *status = 0;
1206 return 0;
1207
1208 case USB4_SB_OPCODE_ERR:
1209 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1210 &metadata, sizeof(metadata));
1211 if (ret)
1212 return ret;
1213
1214 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1215 return 0;
1216
1217 case USB4_SB_OPCODE_ONS:
1218 return -EOPNOTSUPP;
1219
1220 default:
1221 return -EIO;
1222 }
1223}
1224
1225static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1226 void *buf, size_t dwords)
1227{
1228 const struct retimer_info *info = data;
1229 struct tb_port *port = info->port;
1230 u8 index = info->index;
1231 u32 metadata;
1232 int ret;
1233
1234 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1235 if (dwords < USB4_DATA_DWORDS)
1236 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1237
1238 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1239 sizeof(metadata));
1240 if (ret)
1241 return ret;
1242
1243 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1244 if (ret)
1245 return ret;
1246
1247 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1248 dwords * 4);
1249}
1250
1251/**
1252 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1253 * @port: USB4 port
1254 * @index: Retimer index
1255 * @address: NVM address (in bytes) to start reading
1256 * @buf: Data read from NVM is stored here
1257 * @size: Number of bytes to read
1258 *
1259 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1260 * read was successful and negative errno in case of failure.
1261 * Specifically returns %-ENODEV if there is no retimer at @index.
1262 */
1263int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1264 unsigned int address, void *buf, size_t size)
1265{
1266 struct retimer_info info = { .port = port, .index = index };
1267
1268 return usb4_do_read_data(address, buf, size,
1269 usb4_port_retimer_nvm_read_block, &info);
1270}
1271
Mika Westerberg3b1d8d52020-02-21 23:14:41 +02001272/**
1273 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1274 * @port: USB3 adapter port
1275 *
1276 * Return maximum supported link rate of a USB3 adapter in Mb/s.
1277 * Negative errno in case of error.
1278 */
1279int usb4_usb3_port_max_link_rate(struct tb_port *port)
1280{
1281 int ret, lr;
1282 u32 val;
1283
1284 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1285 return -EINVAL;
1286
1287 ret = tb_port_read(port, &val, TB_CFG_PORT,
1288 port->cap_adap + ADP_USB3_CS_4, 1);
1289 if (ret)
1290 return ret;
1291
1292 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1293 return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1294}
1295
1296/**
1297 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1298 * @port: USB3 adapter port
1299 *
1300 * Return actual established link rate of a USB3 adapter in Mb/s. If the
1301 * link is not up returns %0 and negative errno in case of failure.
1302 */
1303int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1304{
1305 int ret, lr;
1306 u32 val;
1307
1308 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1309 return -EINVAL;
1310
1311 ret = tb_port_read(port, &val, TB_CFG_PORT,
1312 port->cap_adap + ADP_USB3_CS_4, 1);
1313 if (ret)
1314 return ret;
1315
1316 if (!(val & ADP_USB3_CS_4_ULV))
1317 return 0;
1318
1319 lr = val & ADP_USB3_CS_4_ALR_MASK;
1320 return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1321}
1322
1323static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1324{
1325 int ret;
1326 u32 val;
1327
1328 if (!tb_port_is_usb3_down(port))
1329 return -EINVAL;
1330 if (tb_route(port->sw))
1331 return -EINVAL;
1332
1333 ret = tb_port_read(port, &val, TB_CFG_PORT,
1334 port->cap_adap + ADP_USB3_CS_2, 1);
1335 if (ret)
1336 return ret;
1337
1338 if (request)
1339 val |= ADP_USB3_CS_2_CMR;
1340 else
1341 val &= ~ADP_USB3_CS_2_CMR;
1342
1343 ret = tb_port_write(port, &val, TB_CFG_PORT,
1344 port->cap_adap + ADP_USB3_CS_2, 1);
1345 if (ret)
1346 return ret;
1347
1348 /*
1349 * We can use val here directly as the CMR bit is in the same place
1350 * as HCA. Just mask out others.
1351 */
1352 val &= ADP_USB3_CS_2_CMR;
1353 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1354 ADP_USB3_CS_1_HCA, val, 1500);
1355}
1356
1357static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1358{
1359 return usb4_usb3_port_cm_request(port, true);
1360}
1361
1362static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1363{
1364 return usb4_usb3_port_cm_request(port, false);
1365}
1366
1367static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1368{
1369 unsigned long uframes;
1370
Colin Ian King4c767ce2020-06-30 15:55:58 +01001371 uframes = bw * 512UL << scale;
Mika Westerberg3b1d8d52020-02-21 23:14:41 +02001372 return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1373}
1374
1375static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1376{
1377 unsigned long uframes;
1378
1379 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1380 uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
Colin Ian King4c767ce2020-06-30 15:55:58 +01001381 return DIV_ROUND_UP(uframes, 512UL << scale);
Mika Westerberg3b1d8d52020-02-21 23:14:41 +02001382}
1383
1384static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1385 int *upstream_bw,
1386 int *downstream_bw)
1387{
1388 u32 val, bw, scale;
1389 int ret;
1390
1391 ret = tb_port_read(port, &val, TB_CFG_PORT,
1392 port->cap_adap + ADP_USB3_CS_2, 1);
1393 if (ret)
1394 return ret;
1395
1396 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1397 port->cap_adap + ADP_USB3_CS_3, 1);
1398 if (ret)
1399 return ret;
1400
1401 scale &= ADP_USB3_CS_3_SCALE_MASK;
1402
1403 bw = val & ADP_USB3_CS_2_AUBW_MASK;
1404 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1405
1406 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
1407 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1408
1409 return 0;
1410}
1411
1412/**
1413 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
1414 * @port: USB3 adapter port
1415 * @upstream_bw: Allocated upstream bandwidth is stored here
1416 * @downstream_bw: Allocated downstream bandwidth is stored here
1417 *
1418 * Stores currently allocated USB3 bandwidth into @upstream_bw and
1419 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
1420 * errno in failure.
1421 */
1422int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1423 int *downstream_bw)
1424{
1425 int ret;
1426
1427 ret = usb4_usb3_port_set_cm_request(port);
1428 if (ret)
1429 return ret;
1430
1431 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
1432 downstream_bw);
1433 usb4_usb3_port_clear_cm_request(port);
1434
1435 return ret;
1436}
1437
1438static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
1439 int *upstream_bw,
1440 int *downstream_bw)
1441{
1442 u32 val, bw, scale;
1443 int ret;
1444
1445 ret = tb_port_read(port, &val, TB_CFG_PORT,
1446 port->cap_adap + ADP_USB3_CS_1, 1);
1447 if (ret)
1448 return ret;
1449
1450 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1451 port->cap_adap + ADP_USB3_CS_3, 1);
1452 if (ret)
1453 return ret;
1454
1455 scale &= ADP_USB3_CS_3_SCALE_MASK;
1456
1457 bw = val & ADP_USB3_CS_1_CUBW_MASK;
1458 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1459
1460 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
1461 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1462
1463 return 0;
1464}
1465
1466static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
1467 int upstream_bw,
1468 int downstream_bw)
1469{
1470 u32 val, ubw, dbw, scale;
1471 int ret;
1472
1473 /* Read the used scale, hardware default is 0 */
1474 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1475 port->cap_adap + ADP_USB3_CS_3, 1);
1476 if (ret)
1477 return ret;
1478
1479 scale &= ADP_USB3_CS_3_SCALE_MASK;
1480 ubw = mbps_to_usb3_bw(upstream_bw, scale);
1481 dbw = mbps_to_usb3_bw(downstream_bw, scale);
1482
1483 ret = tb_port_read(port, &val, TB_CFG_PORT,
1484 port->cap_adap + ADP_USB3_CS_2, 1);
1485 if (ret)
1486 return ret;
1487
1488 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
1489 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
1490 val |= ubw;
1491
1492 return tb_port_write(port, &val, TB_CFG_PORT,
1493 port->cap_adap + ADP_USB3_CS_2, 1);
1494}
1495
1496/**
1497 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
1498 * @port: USB3 adapter port
1499 * @upstream_bw: New upstream bandwidth
1500 * @downstream_bw: New downstream bandwidth
1501 *
1502 * This can be used to set how much bandwidth is allocated for the USB3
1503 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
1504 * new values programmed to the USB3 adapter allocation registers. If
1505 * the values are lower than what is currently consumed the allocation
1506 * is set to what is currently consumed instead (consumed bandwidth
1507 * cannot be taken away by CM). The actual new values are returned in
1508 * @upstream_bw and @downstream_bw.
1509 *
1510 * Returns %0 in case of success and negative errno if there was a
1511 * failure.
1512 */
1513int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1514 int *downstream_bw)
1515{
1516 int ret, consumed_up, consumed_down, allocate_up, allocate_down;
1517
1518 ret = usb4_usb3_port_set_cm_request(port);
1519 if (ret)
1520 return ret;
1521
1522 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1523 &consumed_down);
1524 if (ret)
1525 goto err_request;
1526
1527 /* Don't allow it go lower than what is consumed */
1528 allocate_up = max(*upstream_bw, consumed_up);
1529 allocate_down = max(*downstream_bw, consumed_down);
1530
1531 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
1532 allocate_down);
1533 if (ret)
1534 goto err_request;
1535
1536 *upstream_bw = allocate_up;
1537 *downstream_bw = allocate_down;
1538
1539err_request:
1540 usb4_usb3_port_clear_cm_request(port);
1541 return ret;
1542}
1543
1544/**
1545 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
1546 * @port: USB3 adapter port
1547 * @upstream_bw: New allocated upstream bandwidth
1548 * @downstream_bw: New allocated downstream bandwidth
1549 *
1550 * Releases USB3 allocated bandwidth down to what is actually consumed.
1551 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
1552 *
1553 * Returns 0% in success and negative errno in case of failure.
1554 */
1555int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1556 int *downstream_bw)
1557{
1558 int ret, consumed_up, consumed_down;
1559
1560 ret = usb4_usb3_port_set_cm_request(port);
1561 if (ret)
1562 return ret;
1563
1564 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1565 &consumed_down);
1566 if (ret)
1567 goto err_request;
1568
1569 /*
1570 * Always keep 1000 Mb/s to make sure xHCI has at least some
1571 * bandwidth available for isochronous traffic.
1572 */
1573 if (consumed_up < 1000)
1574 consumed_up = 1000;
1575 if (consumed_down < 1000)
1576 consumed_down = 1000;
1577
1578 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
1579 consumed_down);
1580 if (ret)
1581 goto err_request;
1582
1583 *upstream_bw = consumed_up;
1584 *downstream_bw = consumed_down;
1585
1586err_request:
1587 usb4_usb3_port_clear_cm_request(port);
1588 return ret;
1589}