Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 2 | /* |
Mika Westerberg | 15c6784 | 2018-10-01 12:31:22 +0300 | [diff] [blame] | 3 | * Thunderbolt driver - bus logic (NHI independent) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
Mika Westerberg | 15c6784 | 2018-10-01 12:31:22 +0300 | [diff] [blame] | 6 | * Copyright (C) 2018, Intel Corporation |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #ifndef TB_H_ |
| 10 | #define TB_H_ |
| 11 | |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 12 | #include <linux/nvmem-provider.h> |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 13 | #include <linux/pci.h> |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 14 | #include <linux/thunderbolt.h> |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 15 | #include <linux/uuid.h> |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 16 | |
| 17 | #include "tb_regs.h" |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 18 | #include "ctl.h" |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 19 | #include "dma_port.h" |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 20 | |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 21 | #define NVM_MIN_SIZE SZ_32K |
| 22 | #define NVM_MAX_SIZE SZ_512K |
Mika Westerberg | 9b38303 | 2021-04-01 16:54:15 +0300 | [diff] [blame] | 23 | #define NVM_DATA_DWORDS 16 |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 24 | |
| 25 | /* Intel specific NVM offsets */ |
| 26 | #define NVM_DEVID 0x05 |
| 27 | #define NVM_VERSION 0x08 |
| 28 | #define NVM_FLASH_SIZE 0x45 |
| 29 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 30 | /** |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 31 | * struct tb_nvm - Structure holding NVM information |
| 32 | * @dev: Owner of the NVM |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 33 | * @major: Major version number of the active NVM portion |
| 34 | * @minor: Minor version number of the active NVM portion |
| 35 | * @id: Identifier used with both NVM portions |
| 36 | * @active: Active portion NVMem device |
| 37 | * @non_active: Non-active portion NVMem device |
| 38 | * @buf: Buffer where the NVM image is stored before it is written to |
| 39 | * the actual NVM flash device |
| 40 | * @buf_data_size: Number of bytes actually consumed by the new NVM |
| 41 | * image |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 42 | * @authenticating: The device is authenticating the new NVM |
Mario Limonciello | 4b794f8 | 2020-06-23 11:14:28 -0500 | [diff] [blame] | 43 | * @flushed: The image has been flushed to the storage area |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 44 | * |
| 45 | * The user of this structure needs to handle serialization of possible |
| 46 | * concurrent access. |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 47 | */ |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 48 | struct tb_nvm { |
| 49 | struct device *dev; |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 50 | u8 major; |
| 51 | u8 minor; |
| 52 | int id; |
| 53 | struct nvmem_device *active; |
| 54 | struct nvmem_device *non_active; |
| 55 | void *buf; |
| 56 | size_t buf_data_size; |
| 57 | bool authenticating; |
Mario Limonciello | 4b794f8 | 2020-06-23 11:14:28 -0500 | [diff] [blame] | 58 | bool flushed; |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 59 | }; |
| 60 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 61 | #define TB_SWITCH_KEY_SIZE 32 |
Mika Westerberg | f0342e7 | 2018-12-30 12:14:46 +0200 | [diff] [blame] | 62 | #define TB_SWITCH_MAX_DEPTH 6 |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 63 | #define USB4_SWITCH_MAX_DEPTH 5 |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 64 | |
| 65 | /** |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 66 | * enum tb_switch_tmu_rate - TMU refresh rate |
| 67 | * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake) |
| 68 | * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive |
| 69 | * transmission of the Delay Request TSNOS |
| 70 | * (Time Sync Notification Ordered Set) on a Link |
| 71 | * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive |
| 72 | * transmission of the Delay Request TSNOS on |
| 73 | * a Link |
| 74 | */ |
| 75 | enum tb_switch_tmu_rate { |
| 76 | TB_SWITCH_TMU_RATE_OFF = 0, |
| 77 | TB_SWITCH_TMU_RATE_HIFI = 16, |
| 78 | TB_SWITCH_TMU_RATE_NORMAL = 1000, |
| 79 | }; |
| 80 | |
| 81 | /** |
| 82 | * struct tb_switch_tmu - Structure holding switch TMU configuration |
| 83 | * @cap: Offset to the TMU capability (%0 if not found) |
| 84 | * @has_ucap: Does the switch support uni-directional mode |
| 85 | * @rate: TMU refresh rate related to upstream switch. In case of root |
| 86 | * switch this holds the domain rate. |
| 87 | * @unidirectional: Is the TMU in uni-directional or bi-directional mode |
| 88 | * related to upstream switch. Don't case for root switch. |
| 89 | */ |
| 90 | struct tb_switch_tmu { |
| 91 | int cap; |
| 92 | bool has_ucap; |
| 93 | enum tb_switch_tmu_rate rate; |
| 94 | bool unidirectional; |
| 95 | }; |
| 96 | |
| 97 | /** |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 98 | * struct tb_switch - a thunderbolt switch |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 99 | * @dev: Device for the switch |
| 100 | * @config: Switch configuration |
| 101 | * @ports: Ports in this switch |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 102 | * @dma_port: If the switch has port supporting DMA configuration based |
| 103 | * mailbox this will hold the pointer to that (%NULL |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 104 | * otherwise). If set it also means the switch has |
| 105 | * upgradeable NVM. |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 106 | * @tmu: The switch TMU configuration |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 107 | * @tb: Pointer to the domain the switch belongs to |
| 108 | * @uid: Unique ID of the switch |
| 109 | * @uuid: UUID of the switch (or %NULL if not supported) |
| 110 | * @vendor: Vendor ID of the switch |
| 111 | * @device: Device ID of the switch |
Mika Westerberg | 72ee339 | 2017-06-06 15:25:05 +0300 | [diff] [blame] | 112 | * @vendor_name: Name of the vendor (or %NULL if not known) |
| 113 | * @device_name: Name of the device (or %NULL if not known) |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 114 | * @link_speed: Speed of the link in Gb/s |
| 115 | * @link_width: Width of the link (1 or 2) |
Mika Westerberg | bbcf40b | 2020-03-04 17:09:14 +0200 | [diff] [blame] | 116 | * @link_usb4: Upstream link is USB4 |
Mika Westerberg | 2c3c419 | 2017-06-06 15:25:13 +0300 | [diff] [blame] | 117 | * @generation: Switch Thunderbolt generation |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 118 | * @cap_plug_events: Offset to the plug events capability (%0 if not found) |
Mika Westerberg | a9be558 | 2019-01-09 16:42:12 +0200 | [diff] [blame] | 119 | * @cap_lc: Offset to the link controller capability (%0 if not found) |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 120 | * @is_unplugged: The switch is going away |
| 121 | * @drom: DROM of the switch (%NULL if not found) |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 122 | * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) |
| 123 | * @no_nvm_upgrade: Prevent NVM upgrade of this switch |
| 124 | * @safe_mode: The switch is in safe-mode |
Yehezkel Bernat | 14862ee | 2018-01-22 12:50:09 +0200 | [diff] [blame] | 125 | * @boot: Whether the switch was already authorized on boot or not |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 126 | * @rpm: The switch supports runtime PM |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 127 | * @authorized: Whether the switch is authorized by user or policy |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 128 | * @security_level: Switch supported security level |
Gil Fine | 54e4181 | 2020-06-29 20:30:52 +0300 | [diff] [blame] | 129 | * @debugfs_dir: Pointer to the debugfs structure |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 130 | * @key: Contains the key used to challenge the device or %NULL if not |
| 131 | * supported. Size of the key is %TB_SWITCH_KEY_SIZE. |
| 132 | * @connection_id: Connection ID used with ICM messaging |
| 133 | * @connection_key: Connection key used with ICM messaging |
| 134 | * @link: Root switch link this switch is connected (ICM only) |
| 135 | * @depth: Depth in the chain this switch is connected (ICM only) |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 136 | * @rpm_complete: Completion used to wait for runtime resume to |
| 137 | * complete (ICM only) |
Mario Limonciello | 1cb3629 | 2020-06-23 11:14:29 -0500 | [diff] [blame] | 138 | * @quirks: Quirks used for this Thunderbolt switch |
Mika Westerberg | 56ad3ae | 2021-03-10 13:34:12 +0200 | [diff] [blame] | 139 | * @credit_allocation: Are the below buffer allocation parameters valid |
| 140 | * @max_usb3_credits: Router preferred number of buffers for USB 3.x |
| 141 | * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX |
| 142 | * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN |
| 143 | * @max_pcie_credits: Router preferred number of buffers for PCIe |
| 144 | * @max_dma_credits: Router preferred number of buffers for DMA/P2P |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 145 | * |
| 146 | * When the switch is being added or removed to the domain (other |
Mika Westerberg | 09f11b6 | 2019-03-19 16:48:41 +0200 | [diff] [blame] | 147 | * switches) you need to have domain lock held. |
Mika Westerberg | c3963a5 | 2021-02-01 15:03:00 +0300 | [diff] [blame] | 148 | * |
| 149 | * In USB4 terminology this structure represents a router. |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 150 | */ |
| 151 | struct tb_switch { |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 152 | struct device dev; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 153 | struct tb_regs_switch_header config; |
| 154 | struct tb_port *ports; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 155 | struct tb_dma_port *dma_port; |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 156 | struct tb_switch_tmu tmu; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 157 | struct tb *tb; |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 158 | u64 uid; |
Christoph Hellwig | 7c39ffe | 2017-07-18 15:30:05 +0200 | [diff] [blame] | 159 | uuid_t *uuid; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 160 | u16 vendor; |
| 161 | u16 device; |
Mika Westerberg | 72ee339 | 2017-06-06 15:25:05 +0300 | [diff] [blame] | 162 | const char *vendor_name; |
| 163 | const char *device_name; |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 164 | unsigned int link_speed; |
| 165 | unsigned int link_width; |
Mika Westerberg | bbcf40b | 2020-03-04 17:09:14 +0200 | [diff] [blame] | 166 | bool link_usb4; |
Mika Westerberg | 2c3c419 | 2017-06-06 15:25:13 +0300 | [diff] [blame] | 167 | unsigned int generation; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 168 | int cap_plug_events; |
Mika Westerberg | a9be558 | 2019-01-09 16:42:12 +0200 | [diff] [blame] | 169 | int cap_lc; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 170 | bool is_unplugged; |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 171 | u8 *drom; |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 172 | struct tb_nvm *nvm; |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 173 | bool no_nvm_upgrade; |
| 174 | bool safe_mode; |
Yehezkel Bernat | 14862ee | 2018-01-22 12:50:09 +0200 | [diff] [blame] | 175 | bool boot; |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 176 | bool rpm; |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 177 | unsigned int authorized; |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 178 | enum tb_security_level security_level; |
Gil Fine | 54e4181 | 2020-06-29 20:30:52 +0300 | [diff] [blame] | 179 | struct dentry *debugfs_dir; |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 180 | u8 *key; |
| 181 | u8 connection_id; |
| 182 | u8 connection_key; |
| 183 | u8 link; |
| 184 | u8 depth; |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 185 | struct completion rpm_complete; |
Mario Limonciello | 1cb3629 | 2020-06-23 11:14:29 -0500 | [diff] [blame] | 186 | unsigned long quirks; |
Mika Westerberg | 56ad3ae | 2021-03-10 13:34:12 +0200 | [diff] [blame] | 187 | bool credit_allocation; |
| 188 | unsigned int max_usb3_credits; |
| 189 | unsigned int min_dp_aux_credits; |
| 190 | unsigned int min_dp_main_credits; |
| 191 | unsigned int max_pcie_credits; |
| 192 | unsigned int max_dma_credits; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 193 | }; |
| 194 | |
| 195 | /** |
| 196 | * struct tb_port - a thunderbolt port, part of a tb_switch |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 197 | * @config: Cached port configuration read from registers |
| 198 | * @sw: Switch the port belongs to |
| 199 | * @remote: Remote port (%NULL if not connected) |
| 200 | * @xdomain: Remote host (%NULL if not connected) |
| 201 | * @cap_phy: Offset, zero if not found |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 202 | * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present) |
Mika Westerberg | 56183c8 | 2017-02-19 10:39:34 +0200 | [diff] [blame] | 203 | * @cap_adap: Offset of the adapter specific capability (%0 if not present) |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 204 | * @cap_usb4: Offset to the USB4 port capability (%0 if not present) |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 205 | * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 206 | * @port: Port number on switch |
Nikunj A. Dadhania | 8824d19 | 2020-07-21 17:05:23 +0530 | [diff] [blame] | 207 | * @disabled: Disabled by eeprom or enabled but not implemented |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 208 | * @bonded: true if the port is bonded (two lanes combined as one) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 209 | * @dual_link_port: If the switch is connected using two ports, points |
| 210 | * to the other port. |
| 211 | * @link_nr: Is this primary or secondary port on the dual_link. |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 212 | * @in_hopids: Currently allocated input HopIDs |
| 213 | * @out_hopids: Currently allocated output HopIDs |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 214 | * @list: Used to link ports to DP resources list |
Mika Westerberg | 56ad3ae | 2021-03-10 13:34:12 +0200 | [diff] [blame] | 215 | * @total_credits: Total number of buffers available for this port |
| 216 | * @ctl_credits: Buffers reserved for control path |
Mika Westerberg | 6ed541c | 2021-03-22 18:09:35 +0200 | [diff] [blame] | 217 | * @dma_credits: Number of credits allocated for DMA tunneling for all |
| 218 | * DMA paths through this port. |
Mika Westerberg | c3963a5 | 2021-02-01 15:03:00 +0300 | [diff] [blame] | 219 | * |
| 220 | * In USB4 terminology this structure represents an adapter (protocol or |
| 221 | * lane adapter). |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 222 | */ |
| 223 | struct tb_port { |
| 224 | struct tb_regs_port_header config; |
| 225 | struct tb_switch *sw; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 226 | struct tb_port *remote; |
| 227 | struct tb_xdomain *xdomain; |
| 228 | int cap_phy; |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 229 | int cap_tmu; |
Mika Westerberg | 56183c8 | 2017-02-19 10:39:34 +0200 | [diff] [blame] | 230 | int cap_adap; |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 231 | int cap_usb4; |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 232 | struct usb4_port *usb4; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 233 | u8 port; |
| 234 | bool disabled; |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 235 | bool bonded; |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 236 | struct tb_port *dual_link_port; |
| 237 | u8 link_nr:1; |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 238 | struct ida in_hopids; |
| 239 | struct ida out_hopids; |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 240 | struct list_head list; |
Mika Westerberg | 56ad3ae | 2021-03-10 13:34:12 +0200 | [diff] [blame] | 241 | unsigned int total_credits; |
| 242 | unsigned int ctl_credits; |
Mika Westerberg | 6ed541c | 2021-03-22 18:09:35 +0200 | [diff] [blame] | 243 | unsigned int dma_credits; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 244 | }; |
| 245 | |
| 246 | /** |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 247 | * struct usb4_port - USB4 port device |
| 248 | * @dev: Device for the port |
| 249 | * @port: Pointer to the lane 0 adapter |
Rajmohan Mani | ccc5cb8 | 2021-04-01 18:20:17 +0300 | [diff] [blame^] | 250 | * @can_offline: Does the port have necessary platform support to moved |
| 251 | * it into offline mode and back |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 252 | */ |
| 253 | struct usb4_port { |
| 254 | struct device dev; |
| 255 | struct tb_port *port; |
Rajmohan Mani | ccc5cb8 | 2021-04-01 18:20:17 +0300 | [diff] [blame^] | 256 | bool can_offline; |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 257 | }; |
| 258 | |
| 259 | /** |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 260 | * tb_retimer: Thunderbolt retimer |
| 261 | * @dev: Device for the retimer |
| 262 | * @tb: Pointer to the domain the retimer belongs to |
| 263 | * @index: Retimer index facing the router USB4 port |
| 264 | * @vendor: Vendor ID of the retimer |
| 265 | * @device: Device ID of the retimer |
| 266 | * @port: Pointer to the lane 0 adapter |
| 267 | * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise) |
| 268 | * @auth_status: Status of last NVM authentication |
| 269 | */ |
| 270 | struct tb_retimer { |
| 271 | struct device dev; |
| 272 | struct tb *tb; |
| 273 | u8 index; |
| 274 | u32 vendor; |
| 275 | u32 device; |
| 276 | struct tb_port *port; |
| 277 | struct tb_nvm *nvm; |
| 278 | u32 auth_status; |
| 279 | }; |
| 280 | |
| 281 | /** |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 282 | * struct tb_path_hop - routing information for a tb_path |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 283 | * @in_port: Ingress port of a switch |
| 284 | * @out_port: Egress port of a switch where the packet is routed out |
| 285 | * (must be on the same switch than @in_port) |
| 286 | * @in_hop_index: HopID where the path configuration entry is placed in |
| 287 | * the path config space of @in_port. |
| 288 | * @in_counter_index: Used counter index (not used in the driver |
| 289 | * currently, %-1 to disable) |
| 290 | * @next_hop_index: HopID of the packet when it is routed out from @out_port |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 291 | * @initial_credits: Number of initial flow control credits allocated for |
| 292 | * the path |
Mika Westerberg | 02c5e7c | 2020-12-10 16:07:59 +0200 | [diff] [blame] | 293 | * @nfc_credits: Number of non-flow controlled buffers allocated for the |
| 294 | * @in_port. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 295 | * |
| 296 | * Hop configuration is always done on the IN port of a switch. |
| 297 | * in_port and out_port have to be on the same switch. Packets arriving on |
| 298 | * in_port with "hop" = in_hop_index will get routed to through out_port. The |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 299 | * next hop to take (on out_port->remote) is determined by |
| 300 | * next_hop_index. When routing packet to another switch (out->remote is |
| 301 | * set) the @next_hop_index must match the @in_hop_index of that next |
| 302 | * hop to make routing possible. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 303 | * |
| 304 | * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in |
| 305 | * port. |
| 306 | */ |
| 307 | struct tb_path_hop { |
| 308 | struct tb_port *in_port; |
| 309 | struct tb_port *out_port; |
| 310 | int in_hop_index; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 311 | int in_counter_index; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 312 | int next_hop_index; |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 313 | unsigned int initial_credits; |
Mika Westerberg | 02c5e7c | 2020-12-10 16:07:59 +0200 | [diff] [blame] | 314 | unsigned int nfc_credits; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 315 | }; |
| 316 | |
| 317 | /** |
| 318 | * enum tb_path_port - path options mask |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 319 | * @TB_PATH_NONE: Do not activate on any hop on path |
| 320 | * @TB_PATH_SOURCE: Activate on the first hop (out of src) |
| 321 | * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last) |
| 322 | * @TB_PATH_DESTINATION: Activate on the last hop (into dst) |
| 323 | * @TB_PATH_ALL: Activate on all hops on the path |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 324 | */ |
| 325 | enum tb_path_port { |
| 326 | TB_PATH_NONE = 0, |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 327 | TB_PATH_SOURCE = 1, |
| 328 | TB_PATH_INTERNAL = 2, |
| 329 | TB_PATH_DESTINATION = 4, |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 330 | TB_PATH_ALL = 7, |
| 331 | }; |
| 332 | |
| 333 | /** |
| 334 | * struct tb_path - a unidirectional path between two ports |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 335 | * @tb: Pointer to the domain structure |
| 336 | * @name: Name of the path (used for debugging) |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 337 | * @ingress_shared_buffer: Shared buffering used for ingress ports on the path |
| 338 | * @egress_shared_buffer: Shared buffering used for egress ports on the path |
| 339 | * @ingress_fc_enable: Flow control for ingress ports on the path |
| 340 | * @egress_fc_enable: Flow control for egress ports on the path |
| 341 | * @priority: Priority group if the path |
| 342 | * @weight: Weight of the path inside the priority group |
| 343 | * @drop_packages: Drop packages from queue tail or head |
| 344 | * @activated: Is the path active |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 345 | * @clear_fc: Clear all flow control from the path config space entries |
| 346 | * when deactivating this path |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 347 | * @hops: Path hops |
| 348 | * @path_length: How many hops the path uses |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 349 | * |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 350 | * A path consists of a number of hops (see &struct tb_path_hop). To |
| 351 | * establish a PCIe tunnel two paths have to be created between the two |
| 352 | * PCIe ports. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 353 | */ |
| 354 | struct tb_path { |
| 355 | struct tb *tb; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 356 | const char *name; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 357 | enum tb_path_port ingress_shared_buffer; |
| 358 | enum tb_path_port egress_shared_buffer; |
| 359 | enum tb_path_port ingress_fc_enable; |
| 360 | enum tb_path_port egress_fc_enable; |
| 361 | |
Nathan Chancellor | 3720978 | 2019-04-24 11:34:13 -0700 | [diff] [blame] | 362 | unsigned int priority:3; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 363 | int weight:4; |
| 364 | bool drop_packages; |
| 365 | bool activated; |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 366 | bool clear_fc; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 367 | struct tb_path_hop *hops; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 368 | int path_length; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 369 | }; |
| 370 | |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 371 | /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ |
| 372 | #define TB_PATH_MIN_HOPID 8 |
Mika Westerberg | c738a79 | 2020-05-08 11:47:00 +0300 | [diff] [blame] | 373 | /* |
| 374 | * Support paths from the farthest (depth 6) router to the host and back |
| 375 | * to the same level (not necessarily to the same router). |
| 376 | */ |
| 377 | #define TB_PATH_MAX_HOPS (7 * 2) |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 378 | |
Mika Westerberg | b2911a5 | 2019-12-06 18:36:07 +0200 | [diff] [blame] | 379 | /* Possible wake types */ |
| 380 | #define TB_WAKE_ON_CONNECT BIT(0) |
| 381 | #define TB_WAKE_ON_DISCONNECT BIT(1) |
| 382 | #define TB_WAKE_ON_USB4 BIT(2) |
| 383 | #define TB_WAKE_ON_USB3 BIT(3) |
| 384 | #define TB_WAKE_ON_PCIE BIT(4) |
Mika Westerberg | 6026b70 | 2021-01-14 16:44:17 +0200 | [diff] [blame] | 385 | #define TB_WAKE_ON_DP BIT(5) |
Mika Westerberg | b2911a5 | 2019-12-06 18:36:07 +0200 | [diff] [blame] | 386 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 387 | /** |
| 388 | * struct tb_cm_ops - Connection manager specific operations vector |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 389 | * @driver_ready: Called right after control channel is started. Used by |
| 390 | * ICM to send driver ready message to the firmware. |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 391 | * @start: Starts the domain |
| 392 | * @stop: Stops the domain |
| 393 | * @suspend_noirq: Connection manager specific suspend_noirq |
| 394 | * @resume_noirq: Connection manager specific resume_noirq |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 395 | * @suspend: Connection manager specific suspend |
Mika Westerberg | 884e4d5 | 2020-08-31 13:05:14 +0300 | [diff] [blame] | 396 | * @freeze_noirq: Connection manager specific freeze_noirq |
| 397 | * @thaw_noirq: Connection manager specific thaw_noirq |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 398 | * @complete: Connection manager specific complete |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 399 | * @runtime_suspend: Connection manager specific runtime_suspend |
| 400 | * @runtime_resume: Connection manager specific runtime_resume |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 401 | * @runtime_suspend_switch: Runtime suspend a switch |
| 402 | * @runtime_resume_switch: Runtime resume a switch |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 403 | * @handle_event: Handle thunderbolt event |
Mika Westerberg | 9aaa3b8 | 2018-01-21 12:08:04 +0200 | [diff] [blame] | 404 | * @get_boot_acl: Get boot ACL list |
| 405 | * @set_boot_acl: Set boot ACL list |
Mika Westerberg | 3da88be | 2020-11-10 11:47:14 +0300 | [diff] [blame] | 406 | * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel) |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 407 | * @approve_switch: Approve switch |
| 408 | * @add_switch_key: Add key to switch |
| 409 | * @challenge_switch_key: Challenge switch using key |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 410 | * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 411 | * @approve_xdomain_paths: Approve (establish) XDomain DMA paths |
| 412 | * @disconnect_xdomain_paths: Disconnect XDomain DMA paths |
Mika Westerberg | 9490f71 | 2020-11-03 13:58:00 +0200 | [diff] [blame] | 413 | * @usb4_switch_op: Optional proxy for USB4 router operations. If set |
| 414 | * this will be called whenever USB4 router operation is |
| 415 | * performed. If this returns %-EOPNOTSUPP then the |
| 416 | * native USB4 router operation is called. |
| 417 | * @usb4_switch_nvm_authenticate_status: Optional callback that the CM |
| 418 | * implementation can be used to |
| 419 | * return status of USB4 NVM_AUTH |
| 420 | * router operation. |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 421 | */ |
| 422 | struct tb_cm_ops { |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 423 | int (*driver_ready)(struct tb *tb); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 424 | int (*start)(struct tb *tb); |
| 425 | void (*stop)(struct tb *tb); |
| 426 | int (*suspend_noirq)(struct tb *tb); |
| 427 | int (*resume_noirq)(struct tb *tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 428 | int (*suspend)(struct tb *tb); |
Mika Westerberg | 884e4d5 | 2020-08-31 13:05:14 +0300 | [diff] [blame] | 429 | int (*freeze_noirq)(struct tb *tb); |
| 430 | int (*thaw_noirq)(struct tb *tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 431 | void (*complete)(struct tb *tb); |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 432 | int (*runtime_suspend)(struct tb *tb); |
| 433 | int (*runtime_resume)(struct tb *tb); |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 434 | int (*runtime_suspend_switch)(struct tb_switch *sw); |
| 435 | int (*runtime_resume_switch)(struct tb_switch *sw); |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 436 | void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type, |
| 437 | const void *buf, size_t size); |
Mika Westerberg | 9aaa3b8 | 2018-01-21 12:08:04 +0200 | [diff] [blame] | 438 | int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids); |
| 439 | int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids); |
Mika Westerberg | 3da88be | 2020-11-10 11:47:14 +0300 | [diff] [blame] | 440 | int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 441 | int (*approve_switch)(struct tb *tb, struct tb_switch *sw); |
| 442 | int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); |
| 443 | int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, |
| 444 | const u8 *challenge, u8 *response); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 445 | int (*disconnect_pcie_paths)(struct tb *tb); |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 446 | int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, |
| 447 | int transmit_path, int transmit_ring, |
| 448 | int receive_path, int receive_ring); |
| 449 | int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, |
| 450 | int transmit_path, int transmit_ring, |
| 451 | int receive_path, int receive_ring); |
Mika Westerberg | 9490f71 | 2020-11-03 13:58:00 +0200 | [diff] [blame] | 452 | int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, |
| 453 | u8 *status, const void *tx_data, size_t tx_data_len, |
| 454 | void *rx_data, size_t rx_data_len); |
| 455 | int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw, |
| 456 | u32 *status); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 457 | }; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 458 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 459 | static inline void *tb_priv(struct tb *tb) |
| 460 | { |
| 461 | return (void *)tb->privdata; |
| 462 | } |
| 463 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 464 | #define TB_AUTOSUSPEND_DELAY 15000 /* ms */ |
| 465 | |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 466 | /* helper functions & macros */ |
| 467 | |
| 468 | /** |
| 469 | * tb_upstream_port() - return the upstream port of a switch |
| 470 | * |
| 471 | * Every switch has an upstream port (for the root switch it is the NHI). |
| 472 | * |
| 473 | * During switch alloc/init tb_upstream_port()->remote may be NULL, even for |
| 474 | * non root switches (on the NHI port remote is always NULL). |
| 475 | * |
| 476 | * Return: Returns the upstream port of the switch. |
| 477 | */ |
| 478 | static inline struct tb_port *tb_upstream_port(struct tb_switch *sw) |
| 479 | { |
| 480 | return &sw->ports[sw->config.upstream_port_number]; |
| 481 | } |
| 482 | |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 483 | /** |
| 484 | * tb_is_upstream_port() - Is the port upstream facing |
| 485 | * @port: Port to check |
| 486 | * |
| 487 | * Returns true if @port is upstream facing port. In case of dual link |
| 488 | * ports both return true. |
| 489 | */ |
| 490 | static inline bool tb_is_upstream_port(const struct tb_port *port) |
| 491 | { |
| 492 | const struct tb_port *upstream_port = tb_upstream_port(port->sw); |
| 493 | return port == upstream_port || port->dual_link_port == upstream_port; |
| 494 | } |
| 495 | |
Mika Westerberg | b323a98 | 2019-03-06 19:23:38 +0200 | [diff] [blame] | 496 | static inline u64 tb_route(const struct tb_switch *sw) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 497 | { |
| 498 | return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; |
| 499 | } |
| 500 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 501 | static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) |
| 502 | { |
| 503 | u8 port; |
| 504 | |
| 505 | port = route >> (sw->config.depth * 8); |
| 506 | if (WARN_ON(port > sw->config.max_port_number)) |
| 507 | return NULL; |
| 508 | return &sw->ports[port]; |
| 509 | } |
| 510 | |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 511 | /** |
| 512 | * tb_port_has_remote() - Does the port have switch connected downstream |
| 513 | * @port: Port to check |
| 514 | * |
| 515 | * Returns true only when the port is primary port and has remote set. |
| 516 | */ |
| 517 | static inline bool tb_port_has_remote(const struct tb_port *port) |
| 518 | { |
| 519 | if (tb_is_upstream_port(port)) |
| 520 | return false; |
| 521 | if (!port->remote) |
| 522 | return false; |
| 523 | if (port->dual_link_port && port->link_nr) |
| 524 | return false; |
| 525 | |
| 526 | return true; |
| 527 | } |
| 528 | |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 529 | static inline bool tb_port_is_null(const struct tb_port *port) |
| 530 | { |
| 531 | return port && port->port && port->config.type == TB_TYPE_PORT; |
| 532 | } |
| 533 | |
Mika Westerberg | a3cfebd | 2020-07-25 10:32:46 +0300 | [diff] [blame] | 534 | static inline bool tb_port_is_nhi(const struct tb_port *port) |
| 535 | { |
| 536 | return port && port->config.type == TB_TYPE_NHI; |
| 537 | } |
| 538 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 539 | static inline bool tb_port_is_pcie_down(const struct tb_port *port) |
| 540 | { |
| 541 | return port && port->config.type == TB_TYPE_PCIE_DOWN; |
| 542 | } |
| 543 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 544 | static inline bool tb_port_is_pcie_up(const struct tb_port *port) |
| 545 | { |
| 546 | return port && port->config.type == TB_TYPE_PCIE_UP; |
| 547 | } |
| 548 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 549 | static inline bool tb_port_is_dpin(const struct tb_port *port) |
| 550 | { |
| 551 | return port && port->config.type == TB_TYPE_DP_HDMI_IN; |
| 552 | } |
| 553 | |
| 554 | static inline bool tb_port_is_dpout(const struct tb_port *port) |
| 555 | { |
| 556 | return port && port->config.type == TB_TYPE_DP_HDMI_OUT; |
| 557 | } |
| 558 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 559 | static inline bool tb_port_is_usb3_down(const struct tb_port *port) |
| 560 | { |
| 561 | return port && port->config.type == TB_TYPE_USB3_DOWN; |
| 562 | } |
| 563 | |
| 564 | static inline bool tb_port_is_usb3_up(const struct tb_port *port) |
| 565 | { |
| 566 | return port && port->config.type == TB_TYPE_USB3_UP; |
| 567 | } |
| 568 | |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 569 | static inline int tb_sw_read(struct tb_switch *sw, void *buffer, |
| 570 | enum tb_cfg_space space, u32 offset, u32 length) |
| 571 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 572 | if (sw->is_unplugged) |
| 573 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 574 | return tb_cfg_read(sw->tb->ctl, |
| 575 | buffer, |
| 576 | tb_route(sw), |
| 577 | 0, |
| 578 | space, |
| 579 | offset, |
| 580 | length); |
| 581 | } |
| 582 | |
Mika Westerberg | 826c6a1 | 2019-07-01 18:41:51 +0300 | [diff] [blame] | 583 | static inline int tb_sw_write(struct tb_switch *sw, const void *buffer, |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 584 | enum tb_cfg_space space, u32 offset, u32 length) |
| 585 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 586 | if (sw->is_unplugged) |
| 587 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 588 | return tb_cfg_write(sw->tb->ctl, |
| 589 | buffer, |
| 590 | tb_route(sw), |
| 591 | 0, |
| 592 | space, |
| 593 | offset, |
| 594 | length); |
| 595 | } |
| 596 | |
| 597 | static inline int tb_port_read(struct tb_port *port, void *buffer, |
| 598 | enum tb_cfg_space space, u32 offset, u32 length) |
| 599 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 600 | if (port->sw->is_unplugged) |
| 601 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 602 | return tb_cfg_read(port->sw->tb->ctl, |
| 603 | buffer, |
| 604 | tb_route(port->sw), |
| 605 | port->port, |
| 606 | space, |
| 607 | offset, |
| 608 | length); |
| 609 | } |
| 610 | |
Mika Westerberg | 16a1258 | 2017-06-06 15:24:53 +0300 | [diff] [blame] | 611 | static inline int tb_port_write(struct tb_port *port, const void *buffer, |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 612 | enum tb_cfg_space space, u32 offset, u32 length) |
| 613 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 614 | if (port->sw->is_unplugged) |
| 615 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 616 | return tb_cfg_write(port->sw->tb->ctl, |
| 617 | buffer, |
| 618 | tb_route(port->sw), |
| 619 | port->port, |
| 620 | space, |
| 621 | offset, |
| 622 | length); |
| 623 | } |
| 624 | |
| 625 | #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg) |
| 626 | #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) |
| 627 | #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) |
| 628 | #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg) |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 629 | #define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 630 | |
| 631 | #define __TB_SW_PRINT(level, sw, fmt, arg...) \ |
| 632 | do { \ |
Mika Westerberg | b323a98 | 2019-03-06 19:23:38 +0200 | [diff] [blame] | 633 | const struct tb_switch *__sw = (sw); \ |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 634 | level(__sw->tb, "%llx: " fmt, \ |
| 635 | tb_route(__sw), ## arg); \ |
| 636 | } while (0) |
| 637 | #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) |
| 638 | #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) |
| 639 | #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 640 | #define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 641 | |
| 642 | #define __TB_PORT_PRINT(level, _port, fmt, arg...) \ |
| 643 | do { \ |
Mika Westerberg | b323a98 | 2019-03-06 19:23:38 +0200 | [diff] [blame] | 644 | const struct tb_port *__port = (_port); \ |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 645 | level(__port->sw->tb, "%llx:%x: " fmt, \ |
| 646 | tb_route(__port->sw), __port->port, ## arg); \ |
| 647 | } while (0) |
| 648 | #define tb_port_WARN(port, fmt, arg...) \ |
| 649 | __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg) |
| 650 | #define tb_port_warn(port, fmt, arg...) \ |
| 651 | __TB_PORT_PRINT(tb_warn, port, fmt, ##arg) |
| 652 | #define tb_port_info(port, fmt, arg...) \ |
| 653 | __TB_PORT_PRINT(tb_info, port, fmt, ##arg) |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 654 | #define tb_port_dbg(port, fmt, arg...) \ |
| 655 | __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 656 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 657 | struct tb *icm_probe(struct tb_nhi *nhi); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 658 | struct tb *tb_probe(struct tb_nhi *nhi); |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 659 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 660 | extern struct device_type tb_domain_type; |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 661 | extern struct device_type tb_retimer_type; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 662 | extern struct device_type tb_switch_type; |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 663 | extern struct device_type usb4_port_device_type; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 664 | |
| 665 | int tb_domain_init(void); |
| 666 | void tb_domain_exit(void); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 667 | int tb_xdomain_init(void); |
| 668 | void tb_xdomain_exit(void); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 669 | |
Mika Westerberg | 7f0a34d | 2020-12-29 13:44:57 +0200 | [diff] [blame] | 670 | struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 671 | int tb_domain_add(struct tb *tb); |
| 672 | void tb_domain_remove(struct tb *tb); |
| 673 | int tb_domain_suspend_noirq(struct tb *tb); |
| 674 | int tb_domain_resume_noirq(struct tb *tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 675 | int tb_domain_suspend(struct tb *tb); |
Mika Westerberg | 884e4d5 | 2020-08-31 13:05:14 +0300 | [diff] [blame] | 676 | int tb_domain_freeze_noirq(struct tb *tb); |
| 677 | int tb_domain_thaw_noirq(struct tb *tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 678 | void tb_domain_complete(struct tb *tb); |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 679 | int tb_domain_runtime_suspend(struct tb *tb); |
| 680 | int tb_domain_runtime_resume(struct tb *tb); |
Mika Westerberg | 3da88be | 2020-11-10 11:47:14 +0300 | [diff] [blame] | 681 | int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 682 | int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); |
| 683 | int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); |
| 684 | int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 685 | int tb_domain_disconnect_pcie_paths(struct tb *tb); |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 686 | int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
| 687 | int transmit_path, int transmit_ring, |
| 688 | int receive_path, int receive_ring); |
| 689 | int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
| 690 | int transmit_path, int transmit_ring, |
| 691 | int receive_path, int receive_ring); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 692 | int tb_domain_disconnect_all_paths(struct tb *tb); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 693 | |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 694 | static inline struct tb *tb_domain_get(struct tb *tb) |
| 695 | { |
| 696 | if (tb) |
| 697 | get_device(&tb->dev); |
| 698 | return tb; |
| 699 | } |
| 700 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 701 | static inline void tb_domain_put(struct tb *tb) |
| 702 | { |
| 703 | put_device(&tb->dev); |
| 704 | } |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 705 | |
Mika Westerberg | 719a5fe | 2020-03-05 11:37:15 +0200 | [diff] [blame] | 706 | struct tb_nvm *tb_nvm_alloc(struct device *dev); |
| 707 | int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read); |
| 708 | int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, |
| 709 | size_t bytes); |
| 710 | int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size, |
| 711 | nvmem_reg_write_t reg_write); |
| 712 | void tb_nvm_free(struct tb_nvm *nvm); |
| 713 | void tb_nvm_exit(void); |
| 714 | |
Mika Westerberg | 9b38303 | 2021-04-01 16:54:15 +0300 | [diff] [blame] | 715 | typedef int (*read_block_fn)(void *, unsigned int, void *, size_t); |
| 716 | typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t); |
| 717 | |
| 718 | int tb_nvm_read_data(unsigned int address, void *buf, size_t size, |
| 719 | unsigned int retries, read_block_fn read_block, |
| 720 | void *read_block_data); |
| 721 | int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, |
| 722 | unsigned int retries, write_block_fn write_next_block, |
| 723 | void *write_block_data); |
| 724 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 725 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
| 726 | u64 route); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 727 | struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, |
| 728 | struct device *parent, u64 route); |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 729 | int tb_switch_configure(struct tb_switch *sw); |
| 730 | int tb_switch_add(struct tb_switch *sw); |
| 731 | void tb_switch_remove(struct tb_switch *sw); |
Mika Westerberg | 6ac6fae | 2020-06-05 14:25:02 +0300 | [diff] [blame] | 732 | void tb_switch_suspend(struct tb_switch *sw, bool runtime); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 733 | int tb_switch_resume(struct tb_switch *sw); |
Mika Westerberg | 356b6c4 | 2019-09-19 15:25:30 +0300 | [diff] [blame] | 734 | int tb_switch_reset(struct tb_switch *sw); |
Lukas Wunner | aae20bb | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 735 | void tb_sw_set_unplugged(struct tb_switch *sw); |
Mika Westerberg | 386e5e2 | 2019-12-17 15:33:37 +0300 | [diff] [blame] | 736 | struct tb_port *tb_switch_find_port(struct tb_switch *sw, |
| 737 | enum tb_port_type type); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 738 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, |
| 739 | u8 depth); |
Christoph Hellwig | 7c39ffe | 2017-07-18 15:30:05 +0200 | [diff] [blame] | 740 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); |
Radion Mirchevsky | 8e9267b | 2017-10-04 15:24:14 +0300 | [diff] [blame] | 741 | struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 742 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 743 | /** |
| 744 | * tb_switch_for_each_port() - Iterate over each switch port |
| 745 | * @sw: Switch whose ports to iterate |
| 746 | * @p: Port used as iterator |
| 747 | * |
| 748 | * Iterates over each switch port skipping the control port (port %0). |
| 749 | */ |
| 750 | #define tb_switch_for_each_port(sw, p) \ |
| 751 | for ((p) = &(sw)->ports[1]; \ |
| 752 | (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++) |
| 753 | |
Mika Westerberg | b6b0ea7 | 2017-10-04 15:19:20 +0300 | [diff] [blame] | 754 | static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) |
| 755 | { |
| 756 | if (sw) |
| 757 | get_device(&sw->dev); |
| 758 | return sw; |
| 759 | } |
| 760 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 761 | static inline void tb_switch_put(struct tb_switch *sw) |
| 762 | { |
| 763 | put_device(&sw->dev); |
| 764 | } |
| 765 | |
| 766 | static inline bool tb_is_switch(const struct device *dev) |
| 767 | { |
| 768 | return dev->type == &tb_switch_type; |
| 769 | } |
| 770 | |
| 771 | static inline struct tb_switch *tb_to_switch(struct device *dev) |
| 772 | { |
| 773 | if (tb_is_switch(dev)) |
| 774 | return container_of(dev, struct tb_switch, dev); |
| 775 | return NULL; |
| 776 | } |
| 777 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 778 | static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) |
| 779 | { |
| 780 | return tb_to_switch(sw->dev.parent); |
| 781 | } |
| 782 | |
Mika Westerberg | 17a8f81 | 2019-10-08 16:42:47 +0300 | [diff] [blame] | 783 | static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) |
Mika Westerberg | 8b0110d | 2019-01-08 18:55:09 +0200 | [diff] [blame] | 784 | { |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 785 | return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && |
| 786 | sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; |
Mika Westerberg | 8b0110d | 2019-01-08 18:55:09 +0200 | [diff] [blame] | 787 | } |
| 788 | |
Mika Westerberg | 17a8f81 | 2019-10-08 16:42:47 +0300 | [diff] [blame] | 789 | static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) |
Mika Westerberg | 8b0110d | 2019-01-08 18:55:09 +0200 | [diff] [blame] | 790 | { |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 791 | return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && |
| 792 | sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; |
Mika Westerberg | 8b0110d | 2019-01-08 18:55:09 +0200 | [diff] [blame] | 793 | } |
| 794 | |
Mika Westerberg | 17a8f81 | 2019-10-08 16:42:47 +0300 | [diff] [blame] | 795 | static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 796 | { |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 797 | if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { |
| 798 | switch (sw->config.device_id) { |
| 799 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: |
| 800 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: |
| 801 | return true; |
| 802 | } |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 803 | } |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 804 | return false; |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 805 | } |
| 806 | |
Mika Westerberg | 17a8f81 | 2019-10-08 16:42:47 +0300 | [diff] [blame] | 807 | static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 808 | { |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 809 | if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { |
| 810 | switch (sw->config.device_id) { |
| 811 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: |
| 812 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: |
| 813 | return true; |
| 814 | } |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 815 | } |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 816 | return false; |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 817 | } |
| 818 | |
Mika Westerberg | 7bffd97e | 2019-03-22 15:16:53 +0200 | [diff] [blame] | 819 | static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) |
| 820 | { |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 821 | if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { |
| 822 | switch (sw->config.device_id) { |
| 823 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: |
| 824 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: |
| 825 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: |
| 826 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: |
| 827 | return true; |
| 828 | } |
Mika Westerberg | 7bffd97e | 2019-03-22 15:16:53 +0200 | [diff] [blame] | 829 | } |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 830 | return false; |
Mika Westerberg | 7bffd97e | 2019-03-22 15:16:53 +0200 | [diff] [blame] | 831 | } |
| 832 | |
| 833 | static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) |
| 834 | { |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 835 | if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { |
| 836 | switch (sw->config.device_id) { |
| 837 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
| 838 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: |
| 839 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: |
| 840 | return true; |
| 841 | } |
Mika Westerberg | 7bffd97e | 2019-03-22 15:16:53 +0200 | [diff] [blame] | 842 | } |
Mika Westerberg | 35ee69e | 2020-07-25 10:40:47 +0300 | [diff] [blame] | 843 | return false; |
Mika Westerberg | 7bffd97e | 2019-03-22 15:16:53 +0200 | [diff] [blame] | 844 | } |
| 845 | |
Mika Westerberg | f07a360 | 2019-06-25 15:10:01 +0300 | [diff] [blame] | 846 | /** |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 847 | * tb_switch_is_usb4() - Is the switch USB4 compliant |
| 848 | * @sw: Switch to check |
| 849 | * |
| 850 | * Returns true if the @sw is USB4 compliant router, false otherwise. |
| 851 | */ |
| 852 | static inline bool tb_switch_is_usb4(const struct tb_switch *sw) |
| 853 | { |
| 854 | return sw->config.thunderbolt_version == USB4_VERSION_1_0; |
| 855 | } |
| 856 | |
| 857 | /** |
Mika Westerberg | f07a360 | 2019-06-25 15:10:01 +0300 | [diff] [blame] | 858 | * tb_switch_is_icm() - Is the switch handled by ICM firmware |
| 859 | * @sw: Switch to check |
| 860 | * |
| 861 | * In case there is a need to differentiate whether ICM firmware or SW CM |
| 862 | * is handling @sw this function can be called. It is valid to call this |
| 863 | * after tb_switch_alloc() and tb_switch_configure() has been called |
| 864 | * (latter only for SW CM case). |
| 865 | */ |
| 866 | static inline bool tb_switch_is_icm(const struct tb_switch *sw) |
| 867 | { |
| 868 | return !sw->config.enabled; |
| 869 | } |
| 870 | |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 871 | int tb_switch_lane_bonding_enable(struct tb_switch *sw); |
| 872 | void tb_switch_lane_bonding_disable(struct tb_switch *sw); |
Mika Westerberg | de46203 | 2020-04-02 14:50:52 +0300 | [diff] [blame] | 873 | int tb_switch_configure_link(struct tb_switch *sw); |
| 874 | void tb_switch_unconfigure_link(struct tb_switch *sw); |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 875 | |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 876 | bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); |
| 877 | int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); |
| 878 | void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); |
| 879 | |
Rajmohan Mani | cf29b9af | 2019-12-17 15:33:43 +0300 | [diff] [blame] | 880 | int tb_switch_tmu_init(struct tb_switch *sw); |
| 881 | int tb_switch_tmu_post_time(struct tb_switch *sw); |
| 882 | int tb_switch_tmu_disable(struct tb_switch *sw); |
| 883 | int tb_switch_tmu_enable(struct tb_switch *sw); |
| 884 | |
| 885 | static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) |
| 886 | { |
| 887 | return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI && |
| 888 | !sw->tmu.unidirectional; |
| 889 | } |
| 890 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 891 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 892 | int tb_port_add_nfc_credits(struct tb_port *port, int credits); |
| 893 | int tb_port_clear_counter(struct tb_port *port, int counter); |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 894 | int tb_port_unlock(struct tb_port *port); |
Mika Westerberg | 341d451 | 2020-02-21 12:11:54 +0200 | [diff] [blame] | 895 | int tb_port_enable(struct tb_port *port); |
| 896 | int tb_port_disable(struct tb_port *port); |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 897 | int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); |
| 898 | void tb_port_release_in_hopid(struct tb_port *port, int hopid); |
| 899 | int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); |
| 900 | void tb_port_release_out_hopid(struct tb_port *port, int hopid); |
Mika Westerberg | fb19fac | 2017-02-19 21:51:30 +0200 | [diff] [blame] | 901 | struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, |
| 902 | struct tb_port *prev); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 903 | |
Mika Westerberg | 56ad3ae | 2021-03-10 13:34:12 +0200 | [diff] [blame] | 904 | static inline bool tb_port_use_credit_allocation(const struct tb_port *port) |
| 905 | { |
| 906 | return tb_port_is_null(port) && port->sw->credit_allocation; |
| 907 | } |
| 908 | |
Mika Westerberg | c64c3f3 | 2020-04-29 17:07:59 +0300 | [diff] [blame] | 909 | /** |
| 910 | * tb_for_each_port_on_path() - Iterate over each port on path |
| 911 | * @src: Source port |
| 912 | * @dst: Destination port |
| 913 | * @p: Port used as iterator |
| 914 | * |
| 915 | * Walks over each port on path from @src to @dst. |
| 916 | */ |
| 917 | #define tb_for_each_port_on_path(src, dst, p) \ |
| 918 | for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ |
| 919 | (p) = tb_next_port_on_path((src), (dst), (p))) |
| 920 | |
Mika Westerberg | 5b7b8c0 | 2020-05-08 12:41:34 +0300 | [diff] [blame] | 921 | int tb_port_get_link_speed(struct tb_port *port); |
Isaac Hazan | 4210d50 | 2020-09-24 11:43:58 +0300 | [diff] [blame] | 922 | int tb_port_get_link_width(struct tb_port *port); |
Isaac Hazan | 5cc0df9 | 2020-09-24 11:44:01 +0300 | [diff] [blame] | 923 | int tb_port_state(struct tb_port *port); |
| 924 | int tb_port_lane_bonding_enable(struct tb_port *port); |
| 925 | void tb_port_lane_bonding_disable(struct tb_port *port); |
Mika Westerberg | e7051be | 2021-03-22 16:54:54 +0200 | [diff] [blame] | 926 | int tb_port_wait_for_link_width(struct tb_port *port, int width, |
| 927 | int timeout_msec); |
Mika Westerberg | 69fea37 | 2021-03-22 17:01:59 +0200 | [diff] [blame] | 928 | int tb_port_update_credits(struct tb_port *port); |
Mika Westerberg | 5b7b8c0 | 2020-05-08 12:41:34 +0300 | [diff] [blame] | 929 | |
Mika Westerberg | da2da04 | 2017-06-06 15:24:58 +0300 | [diff] [blame] | 930 | int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); |
Rajmohan Mani | aa43a9d | 2019-12-17 15:33:42 +0300 | [diff] [blame] | 931 | int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); |
Mika Westerberg | 6de057e | 2020-06-29 20:21:07 +0300 | [diff] [blame] | 932 | int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset); |
Mika Westerberg | da2da04 | 2017-06-06 15:24:58 +0300 | [diff] [blame] | 933 | int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); |
Mika Westerberg | 3c8b228 | 2020-06-29 20:15:17 +0300 | [diff] [blame] | 934 | int tb_port_next_cap(struct tb_port *port, unsigned int offset); |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 935 | bool tb_port_is_enabled(struct tb_port *port); |
Andreas Noever | e2b8785 | 2014-06-03 22:04:03 +0200 | [diff] [blame] | 936 | |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 937 | bool tb_usb3_port_is_enabled(struct tb_port *port); |
| 938 | int tb_usb3_port_enable(struct tb_port *port, bool enable); |
| 939 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 940 | bool tb_pci_port_is_enabled(struct tb_port *port); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 941 | int tb_pci_port_enable(struct tb_port *port, bool enable); |
| 942 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 943 | int tb_dp_port_hpd_is_active(struct tb_port *port); |
| 944 | int tb_dp_port_hpd_clear(struct tb_port *port); |
| 945 | int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, |
| 946 | unsigned int aux_tx, unsigned int aux_rx); |
| 947 | bool tb_dp_port_is_enabled(struct tb_port *port); |
| 948 | int tb_dp_port_enable(struct tb_port *port, bool enable); |
| 949 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 950 | struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, |
| 951 | struct tb_port *dst, int dst_hopid, |
| 952 | struct tb_port **last, const char *name); |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 953 | struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, |
| 954 | struct tb_port *dst, int dst_hopid, int link_nr, |
| 955 | const char *name); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 956 | void tb_path_free(struct tb_path *path); |
| 957 | int tb_path_activate(struct tb_path *path); |
| 958 | void tb_path_deactivate(struct tb_path *path); |
| 959 | bool tb_path_is_invalid(struct tb_path *path); |
Mika Westerberg | 0bd680c | 2020-03-24 14:44:13 +0200 | [diff] [blame] | 960 | bool tb_path_port_on_path(const struct tb_path *path, |
| 961 | const struct tb_port *port); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 962 | |
Mika Westerberg | 6ed541c | 2021-03-22 18:09:35 +0200 | [diff] [blame] | 963 | /** |
| 964 | * tb_path_for_each_hop() - Iterate over each hop on path |
| 965 | * @path: Path whose hops to iterate |
| 966 | * @hop: Hop used as iterator |
| 967 | * |
| 968 | * Iterates over each hop on path. |
| 969 | */ |
| 970 | #define tb_path_for_each_hop(path, hop) \ |
| 971 | for ((hop) = &(path)->hops[0]; \ |
| 972 | (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++) |
| 973 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 974 | int tb_drom_read(struct tb_switch *sw); |
| 975 | int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 976 | |
Mika Westerberg | a9be558 | 2019-01-09 16:42:12 +0200 | [diff] [blame] | 977 | int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); |
Mika Westerberg | e28178b | 2020-04-02 12:42:44 +0300 | [diff] [blame] | 978 | int tb_lc_configure_port(struct tb_port *port); |
| 979 | void tb_lc_unconfigure_port(struct tb_port *port); |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 980 | int tb_lc_configure_xdomain(struct tb_port *port); |
| 981 | void tb_lc_unconfigure_xdomain(struct tb_port *port); |
Mika Westerberg | fdb0887 | 2020-11-26 12:52:43 +0300 | [diff] [blame] | 982 | int tb_lc_start_lane_initialization(struct tb_port *port); |
Mika Westerberg | b2911a5 | 2019-12-06 18:36:07 +0200 | [diff] [blame] | 983 | int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); |
Mika Westerberg | 5480dfc | 2019-01-09 17:25:43 +0200 | [diff] [blame] | 984 | int tb_lc_set_sleep(struct tb_switch *sw); |
Mika Westerberg | 91c0c12 | 2019-03-21 19:03:00 +0200 | [diff] [blame] | 985 | bool tb_lc_lane_bonding_possible(struct tb_switch *sw); |
Mika Westerberg | 8afe909 | 2019-03-26 15:52:30 +0300 | [diff] [blame] | 986 | bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); |
| 987 | int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in); |
| 988 | int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in); |
Mario Limonciello | 1cb3629 | 2020-06-23 11:14:29 -0500 | [diff] [blame] | 989 | int tb_lc_force_power(struct tb_switch *sw); |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 990 | |
| 991 | static inline int tb_route_length(u64 route) |
| 992 | { |
| 993 | return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; |
| 994 | } |
| 995 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 996 | /** |
| 997 | * tb_downstream_route() - get route to downstream switch |
| 998 | * |
| 999 | * Port must not be the upstream port (otherwise a loop is created). |
| 1000 | * |
| 1001 | * Return: Returns a route to the switch behind @port. |
| 1002 | */ |
| 1003 | static inline u64 tb_downstream_route(struct tb_port *port) |
| 1004 | { |
| 1005 | return tb_route(port->sw) |
| 1006 | | ((u64) port->port << (port->sw->config.depth * 8)); |
| 1007 | } |
| 1008 | |
Mika Westerberg | 5ca6768 | 2020-10-22 13:22:06 +0300 | [diff] [blame] | 1009 | bool tb_is_xdomain_enabled(void); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1010 | bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, |
| 1011 | const void *buf, size_t size); |
| 1012 | struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, |
| 1013 | u64 route, const uuid_t *local_uuid, |
| 1014 | const uuid_t *remote_uuid); |
| 1015 | void tb_xdomain_add(struct tb_xdomain *xd); |
| 1016 | void tb_xdomain_remove(struct tb_xdomain *xd); |
| 1017 | struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, |
| 1018 | u8 depth); |
| 1019 | |
Kranthi Kuntala | dacb128 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 1020 | int tb_retimer_scan(struct tb_port *port); |
| 1021 | void tb_retimer_remove_all(struct tb_port *port); |
| 1022 | |
| 1023 | static inline bool tb_is_retimer(const struct device *dev) |
| 1024 | { |
| 1025 | return dev->type == &tb_retimer_type; |
| 1026 | } |
| 1027 | |
| 1028 | static inline struct tb_retimer *tb_to_retimer(struct device *dev) |
| 1029 | { |
| 1030 | if (tb_is_retimer(dev)) |
| 1031 | return container_of(dev, struct tb_retimer, dev); |
| 1032 | return NULL; |
| 1033 | } |
| 1034 | |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 1035 | int usb4_switch_setup(struct tb_switch *sw); |
| 1036 | int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); |
| 1037 | int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, |
| 1038 | size_t size); |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 1039 | bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); |
Mika Westerberg | b2911a5 | 2019-12-06 18:36:07 +0200 | [diff] [blame] | 1040 | int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags); |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 1041 | int usb4_switch_set_sleep(struct tb_switch *sw); |
| 1042 | int usb4_switch_nvm_sector_size(struct tb_switch *sw); |
| 1043 | int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, |
| 1044 | size_t size); |
| 1045 | int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, |
| 1046 | const void *buf, size_t size); |
| 1047 | int usb4_switch_nvm_authenticate(struct tb_switch *sw); |
Mika Westerberg | 661b194 | 2020-11-10 11:34:07 +0300 | [diff] [blame] | 1048 | int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status); |
Mika Westerberg | 56ad3ae | 2021-03-10 13:34:12 +0200 | [diff] [blame] | 1049 | int usb4_switch_credits_init(struct tb_switch *sw); |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 1050 | bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); |
| 1051 | int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); |
| 1052 | int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); |
| 1053 | struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, |
| 1054 | const struct tb_port *port); |
Rajmohan Mani | e6f8185 | 2019-12-17 15:33:44 +0300 | [diff] [blame] | 1055 | struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, |
| 1056 | const struct tb_port *port); |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 1057 | int usb4_switch_add_ports(struct tb_switch *sw); |
| 1058 | void usb4_switch_remove_ports(struct tb_switch *sw); |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 1059 | |
| 1060 | int usb4_port_unlock(struct tb_port *port); |
Mika Westerberg | e28178b | 2020-04-02 12:42:44 +0300 | [diff] [blame] | 1061 | int usb4_port_configure(struct tb_port *port); |
| 1062 | void usb4_port_unconfigure(struct tb_port *port); |
Mika Westerberg | 284652a | 2020-04-09 14:23:32 +0300 | [diff] [blame] | 1063 | int usb4_port_configure_xdomain(struct tb_port *port); |
| 1064 | void usb4_port_unconfigure_xdomain(struct tb_port *port); |
Rajmohan Mani | 02d1285 | 2020-03-05 16:33:46 +0200 | [diff] [blame] | 1065 | int usb4_port_enumerate_retimers(struct tb_port *port); |
| 1066 | |
| 1067 | int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, |
| 1068 | u8 size); |
| 1069 | int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, |
| 1070 | const void *buf, u8 size); |
| 1071 | int usb4_port_retimer_is_last(struct tb_port *port, u8 index); |
| 1072 | int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index); |
| 1073 | int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, |
| 1074 | unsigned int address, const void *buf, |
| 1075 | size_t size); |
| 1076 | int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index); |
| 1077 | int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, |
| 1078 | u32 *status); |
| 1079 | int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, |
| 1080 | unsigned int address, void *buf, size_t size); |
Mika Westerberg | 3b1d8d5 | 2020-02-21 23:14:41 +0200 | [diff] [blame] | 1081 | |
| 1082 | int usb4_usb3_port_max_link_rate(struct tb_port *port); |
| 1083 | int usb4_usb3_port_actual_link_rate(struct tb_port *port); |
| 1084 | int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, |
| 1085 | int *downstream_bw); |
| 1086 | int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, |
| 1087 | int *downstream_bw); |
| 1088 | int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, |
| 1089 | int *downstream_bw); |
Mario Limonciello | 1cb3629 | 2020-06-23 11:14:29 -0500 | [diff] [blame] | 1090 | |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 1091 | static inline bool tb_is_usb4_port_device(const struct device *dev) |
| 1092 | { |
| 1093 | return dev->type == &usb4_port_device_type; |
| 1094 | } |
| 1095 | |
| 1096 | static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev) |
| 1097 | { |
| 1098 | if (tb_is_usb4_port_device(dev)) |
| 1099 | return container_of(dev, struct usb4_port, dev); |
| 1100 | return NULL; |
| 1101 | } |
| 1102 | |
| 1103 | struct usb4_port *usb4_port_device_add(struct tb_port *port); |
| 1104 | void usb4_port_device_remove(struct usb4_port *usb4); |
| 1105 | |
Mika Westerberg | 810278d | 2020-08-26 08:58:29 +0300 | [diff] [blame] | 1106 | /* Keep link controller awake during update */ |
Mario Limonciello | 1cb3629 | 2020-06-23 11:14:29 -0500 | [diff] [blame] | 1107 | #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) |
| 1108 | |
| 1109 | void tb_check_quirks(struct tb_switch *sw); |
| 1110 | |
Mika Westerberg | b2be2b0 | 2019-04-02 15:26:00 +0300 | [diff] [blame] | 1111 | #ifdef CONFIG_ACPI |
| 1112 | void tb_acpi_add_links(struct tb_nhi *nhi); |
Mika Westerberg | c6da62a | 2020-02-18 16:14:42 +0200 | [diff] [blame] | 1113 | |
| 1114 | bool tb_acpi_is_native(void); |
| 1115 | bool tb_acpi_may_tunnel_usb3(void); |
| 1116 | bool tb_acpi_may_tunnel_dp(void); |
| 1117 | bool tb_acpi_may_tunnel_pcie(void); |
| 1118 | bool tb_acpi_is_xdomain_allowed(void); |
Rajmohan Mani | ccc5cb8 | 2021-04-01 18:20:17 +0300 | [diff] [blame^] | 1119 | |
| 1120 | int tb_acpi_init(void); |
| 1121 | void tb_acpi_exit(void); |
| 1122 | int tb_acpi_power_on_retimers(struct tb_port *port); |
| 1123 | int tb_acpi_power_off_retimers(struct tb_port *port); |
Mika Westerberg | b2be2b0 | 2019-04-02 15:26:00 +0300 | [diff] [blame] | 1124 | #else |
| 1125 | static inline void tb_acpi_add_links(struct tb_nhi *nhi) { } |
Mika Westerberg | c6da62a | 2020-02-18 16:14:42 +0200 | [diff] [blame] | 1126 | |
| 1127 | static inline bool tb_acpi_is_native(void) { return true; } |
| 1128 | static inline bool tb_acpi_may_tunnel_usb3(void) { return true; } |
| 1129 | static inline bool tb_acpi_may_tunnel_dp(void) { return true; } |
| 1130 | static inline bool tb_acpi_may_tunnel_pcie(void) { return true; } |
| 1131 | static inline bool tb_acpi_is_xdomain_allowed(void) { return true; } |
Rajmohan Mani | ccc5cb8 | 2021-04-01 18:20:17 +0300 | [diff] [blame^] | 1132 | |
| 1133 | static inline int tb_acpi_init(void) { return 0; } |
| 1134 | static inline void tb_acpi_exit(void) { } |
| 1135 | static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; } |
| 1136 | static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; } |
Mika Westerberg | b2be2b0 | 2019-04-02 15:26:00 +0300 | [diff] [blame] | 1137 | #endif |
| 1138 | |
Gil Fine | 54e4181 | 2020-06-29 20:30:52 +0300 | [diff] [blame] | 1139 | #ifdef CONFIG_DEBUG_FS |
| 1140 | void tb_debugfs_init(void); |
| 1141 | void tb_debugfs_exit(void); |
| 1142 | void tb_switch_debugfs_init(struct tb_switch *sw); |
| 1143 | void tb_switch_debugfs_remove(struct tb_switch *sw); |
Mika Westerberg | 407ac93 | 2020-10-07 17:53:44 +0300 | [diff] [blame] | 1144 | void tb_service_debugfs_init(struct tb_service *svc); |
| 1145 | void tb_service_debugfs_remove(struct tb_service *svc); |
Gil Fine | 54e4181 | 2020-06-29 20:30:52 +0300 | [diff] [blame] | 1146 | #else |
| 1147 | static inline void tb_debugfs_init(void) { } |
| 1148 | static inline void tb_debugfs_exit(void) { } |
| 1149 | static inline void tb_switch_debugfs_init(struct tb_switch *sw) { } |
| 1150 | static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { } |
Mika Westerberg | 407ac93 | 2020-10-07 17:53:44 +0300 | [diff] [blame] | 1151 | static inline void tb_service_debugfs_init(struct tb_service *svc) { } |
| 1152 | static inline void tb_service_debugfs_remove(struct tb_service *svc) { } |
Gil Fine | 54e4181 | 2020-06-29 20:30:52 +0300 | [diff] [blame] | 1153 | #endif |
| 1154 | |
Mika Westerberg | 2c6ea4e | 2020-08-24 12:46:52 +0300 | [diff] [blame] | 1155 | #ifdef CONFIG_USB4_KUNIT_TEST |
| 1156 | int tb_test_init(void); |
| 1157 | void tb_test_exit(void); |
| 1158 | #else |
| 1159 | static inline int tb_test_init(void) { return 0; } |
| 1160 | static inline void tb_test_exit(void) { } |
| 1161 | #endif |
| 1162 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 1163 | #endif |