Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 2 | /* |
Mika Westerberg | 15c6784 | 2018-10-01 12:31:22 +0300 | [diff] [blame] | 3 | * Thunderbolt driver - bus logic (NHI independent) |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
Mika Westerberg | 15c6784 | 2018-10-01 12:31:22 +0300 | [diff] [blame] | 6 | * Copyright (C) 2018, Intel Corporation |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #ifndef TB_H_ |
| 10 | #define TB_H_ |
| 11 | |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 12 | #include <linux/nvmem-provider.h> |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 13 | #include <linux/pci.h> |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 14 | #include <linux/thunderbolt.h> |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 15 | #include <linux/uuid.h> |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 16 | |
| 17 | #include "tb_regs.h" |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 18 | #include "ctl.h" |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 19 | #include "dma_port.h" |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 20 | |
| 21 | /** |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 22 | * struct tb_switch_nvm - Structure holding switch NVM information |
| 23 | * @major: Major version number of the active NVM portion |
| 24 | * @minor: Minor version number of the active NVM portion |
| 25 | * @id: Identifier used with both NVM portions |
| 26 | * @active: Active portion NVMem device |
| 27 | * @non_active: Non-active portion NVMem device |
| 28 | * @buf: Buffer where the NVM image is stored before it is written to |
| 29 | * the actual NVM flash device |
| 30 | * @buf_data_size: Number of bytes actually consumed by the new NVM |
| 31 | * image |
| 32 | * @authenticating: The switch is authenticating the new NVM |
| 33 | */ |
| 34 | struct tb_switch_nvm { |
| 35 | u8 major; |
| 36 | u8 minor; |
| 37 | int id; |
| 38 | struct nvmem_device *active; |
| 39 | struct nvmem_device *non_active; |
| 40 | void *buf; |
| 41 | size_t buf_data_size; |
| 42 | bool authenticating; |
| 43 | }; |
| 44 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 45 | #define TB_SWITCH_KEY_SIZE 32 |
Mika Westerberg | f0342e7 | 2018-12-30 12:14:46 +0200 | [diff] [blame] | 46 | #define TB_SWITCH_MAX_DEPTH 6 |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 47 | |
| 48 | /** |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 49 | * struct tb_switch - a thunderbolt switch |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 50 | * @dev: Device for the switch |
| 51 | * @config: Switch configuration |
| 52 | * @ports: Ports in this switch |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 53 | * @dma_port: If the switch has port supporting DMA configuration based |
| 54 | * mailbox this will hold the pointer to that (%NULL |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 55 | * otherwise). If set it also means the switch has |
| 56 | * upgradeable NVM. |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 57 | * @tb: Pointer to the domain the switch belongs to |
| 58 | * @uid: Unique ID of the switch |
| 59 | * @uuid: UUID of the switch (or %NULL if not supported) |
| 60 | * @vendor: Vendor ID of the switch |
| 61 | * @device: Device ID of the switch |
Mika Westerberg | 72ee339 | 2017-06-06 15:25:05 +0300 | [diff] [blame] | 62 | * @vendor_name: Name of the vendor (or %NULL if not known) |
| 63 | * @device_name: Name of the device (or %NULL if not known) |
Mika Westerberg | 2c3c419 | 2017-06-06 15:25:13 +0300 | [diff] [blame] | 64 | * @generation: Switch Thunderbolt generation |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 65 | * @cap_plug_events: Offset to the plug events capability (%0 if not found) |
Mika Westerberg | a9be558 | 2019-01-09 16:42:12 +0200 | [diff] [blame] | 66 | * @cap_lc: Offset to the link controller capability (%0 if not found) |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 67 | * @is_unplugged: The switch is going away |
| 68 | * @drom: DROM of the switch (%NULL if not found) |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 69 | * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) |
| 70 | * @no_nvm_upgrade: Prevent NVM upgrade of this switch |
| 71 | * @safe_mode: The switch is in safe-mode |
Yehezkel Bernat | 14862ee | 2018-01-22 12:50:09 +0200 | [diff] [blame] | 72 | * @boot: Whether the switch was already authorized on boot or not |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 73 | * @rpm: The switch supports runtime PM |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 74 | * @authorized: Whether the switch is authorized by user or policy |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 75 | * @security_level: Switch supported security level |
| 76 | * @key: Contains the key used to challenge the device or %NULL if not |
| 77 | * supported. Size of the key is %TB_SWITCH_KEY_SIZE. |
| 78 | * @connection_id: Connection ID used with ICM messaging |
| 79 | * @connection_key: Connection key used with ICM messaging |
| 80 | * @link: Root switch link this switch is connected (ICM only) |
| 81 | * @depth: Depth in the chain this switch is connected (ICM only) |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 82 | * @rpm_complete: Completion used to wait for runtime resume to |
| 83 | * complete (ICM only) |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 84 | * |
| 85 | * When the switch is being added or removed to the domain (other |
Mika Westerberg | 09f11b6 | 2019-03-19 16:48:41 +0200 | [diff] [blame] | 86 | * switches) you need to have domain lock held. |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 87 | */ |
| 88 | struct tb_switch { |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 89 | struct device dev; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 90 | struct tb_regs_switch_header config; |
| 91 | struct tb_port *ports; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 92 | struct tb_dma_port *dma_port; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 93 | struct tb *tb; |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 94 | u64 uid; |
Christoph Hellwig | 7c39ffe | 2017-07-18 15:30:05 +0200 | [diff] [blame] | 95 | uuid_t *uuid; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 96 | u16 vendor; |
| 97 | u16 device; |
Mika Westerberg | 72ee339 | 2017-06-06 15:25:05 +0300 | [diff] [blame] | 98 | const char *vendor_name; |
| 99 | const char *device_name; |
Mika Westerberg | 2c3c419 | 2017-06-06 15:25:13 +0300 | [diff] [blame] | 100 | unsigned int generation; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 101 | int cap_plug_events; |
Mika Westerberg | a9be558 | 2019-01-09 16:42:12 +0200 | [diff] [blame] | 102 | int cap_lc; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 103 | bool is_unplugged; |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 104 | u8 *drom; |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 105 | struct tb_switch_nvm *nvm; |
| 106 | bool no_nvm_upgrade; |
| 107 | bool safe_mode; |
Yehezkel Bernat | 14862ee | 2018-01-22 12:50:09 +0200 | [diff] [blame] | 108 | bool boot; |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 109 | bool rpm; |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 110 | unsigned int authorized; |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 111 | enum tb_security_level security_level; |
| 112 | u8 *key; |
| 113 | u8 connection_id; |
| 114 | u8 connection_key; |
| 115 | u8 link; |
| 116 | u8 depth; |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 117 | struct completion rpm_complete; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 118 | }; |
| 119 | |
| 120 | /** |
| 121 | * struct tb_port - a thunderbolt port, part of a tb_switch |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 122 | * @config: Cached port configuration read from registers |
| 123 | * @sw: Switch the port belongs to |
| 124 | * @remote: Remote port (%NULL if not connected) |
| 125 | * @xdomain: Remote host (%NULL if not connected) |
| 126 | * @cap_phy: Offset, zero if not found |
Mika Westerberg | 56183c8 | 2017-02-19 10:39:34 +0200 | [diff] [blame] | 127 | * @cap_adap: Offset of the adapter specific capability (%0 if not present) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 128 | * @port: Port number on switch |
| 129 | * @disabled: Disabled by eeprom |
| 130 | * @dual_link_port: If the switch is connected using two ports, points |
| 131 | * to the other port. |
| 132 | * @link_nr: Is this primary or secondary port on the dual_link. |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 133 | * @in_hopids: Currently allocated input HopIDs |
| 134 | * @out_hopids: Currently allocated output HopIDs |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 135 | */ |
| 136 | struct tb_port { |
| 137 | struct tb_regs_port_header config; |
| 138 | struct tb_switch *sw; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 139 | struct tb_port *remote; |
| 140 | struct tb_xdomain *xdomain; |
| 141 | int cap_phy; |
Mika Westerberg | 56183c8 | 2017-02-19 10:39:34 +0200 | [diff] [blame] | 142 | int cap_adap; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 143 | u8 port; |
| 144 | bool disabled; |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 145 | struct tb_port *dual_link_port; |
| 146 | u8 link_nr:1; |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 147 | struct ida in_hopids; |
| 148 | struct ida out_hopids; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 149 | }; |
| 150 | |
| 151 | /** |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 152 | * struct tb_path_hop - routing information for a tb_path |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 153 | * @in_port: Ingress port of a switch |
| 154 | * @out_port: Egress port of a switch where the packet is routed out |
| 155 | * (must be on the same switch than @in_port) |
| 156 | * @in_hop_index: HopID where the path configuration entry is placed in |
| 157 | * the path config space of @in_port. |
| 158 | * @in_counter_index: Used counter index (not used in the driver |
| 159 | * currently, %-1 to disable) |
| 160 | * @next_hop_index: HopID of the packet when it is routed out from @out_port |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 161 | * @initial_credits: Number of initial flow control credits allocated for |
| 162 | * the path |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 163 | * |
| 164 | * Hop configuration is always done on the IN port of a switch. |
| 165 | * in_port and out_port have to be on the same switch. Packets arriving on |
| 166 | * in_port with "hop" = in_hop_index will get routed to through out_port. The |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 167 | * next hop to take (on out_port->remote) is determined by |
| 168 | * next_hop_index. When routing packet to another switch (out->remote is |
| 169 | * set) the @next_hop_index must match the @in_hop_index of that next |
| 170 | * hop to make routing possible. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 171 | * |
| 172 | * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in |
| 173 | * port. |
| 174 | */ |
| 175 | struct tb_path_hop { |
| 176 | struct tb_port *in_port; |
| 177 | struct tb_port *out_port; |
| 178 | int in_hop_index; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 179 | int in_counter_index; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 180 | int next_hop_index; |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 181 | unsigned int initial_credits; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 182 | }; |
| 183 | |
| 184 | /** |
| 185 | * enum tb_path_port - path options mask |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 186 | * @TB_PATH_NONE: Do not activate on any hop on path |
| 187 | * @TB_PATH_SOURCE: Activate on the first hop (out of src) |
| 188 | * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last) |
| 189 | * @TB_PATH_DESTINATION: Activate on the last hop (into dst) |
| 190 | * @TB_PATH_ALL: Activate on all hops on the path |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 191 | */ |
| 192 | enum tb_path_port { |
| 193 | TB_PATH_NONE = 0, |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 194 | TB_PATH_SOURCE = 1, |
| 195 | TB_PATH_INTERNAL = 2, |
| 196 | TB_PATH_DESTINATION = 4, |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 197 | TB_PATH_ALL = 7, |
| 198 | }; |
| 199 | |
| 200 | /** |
| 201 | * struct tb_path - a unidirectional path between two ports |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 202 | * @tb: Pointer to the domain structure |
| 203 | * @name: Name of the path (used for debugging) |
| 204 | * @nfc_credits: Number of non flow controlled credits allocated for the path |
| 205 | * @ingress_shared_buffer: Shared buffering used for ingress ports on the path |
| 206 | * @egress_shared_buffer: Shared buffering used for egress ports on the path |
| 207 | * @ingress_fc_enable: Flow control for ingress ports on the path |
| 208 | * @egress_fc_enable: Flow control for egress ports on the path |
| 209 | * @priority: Priority group if the path |
| 210 | * @weight: Weight of the path inside the priority group |
| 211 | * @drop_packages: Drop packages from queue tail or head |
| 212 | * @activated: Is the path active |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 213 | * @clear_fc: Clear all flow control from the path config space entries |
| 214 | * when deactivating this path |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 215 | * @hops: Path hops |
| 216 | * @path_length: How many hops the path uses |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 217 | * |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 218 | * A path consists of a number of hops (see &struct tb_path_hop). To |
| 219 | * establish a PCIe tunnel two paths have to be created between the two |
| 220 | * PCIe ports. |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 221 | */ |
| 222 | struct tb_path { |
| 223 | struct tb *tb; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 224 | const char *name; |
| 225 | int nfc_credits; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 226 | enum tb_path_port ingress_shared_buffer; |
| 227 | enum tb_path_port egress_shared_buffer; |
| 228 | enum tb_path_port ingress_fc_enable; |
| 229 | enum tb_path_port egress_fc_enable; |
| 230 | |
Nathan Chancellor | 3720978 | 2019-04-24 11:34:13 -0700 | [diff] [blame] | 231 | unsigned int priority:3; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 232 | int weight:4; |
| 233 | bool drop_packages; |
| 234 | bool activated; |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 235 | bool clear_fc; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 236 | struct tb_path_hop *hops; |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 237 | int path_length; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 238 | }; |
| 239 | |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 240 | /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ |
| 241 | #define TB_PATH_MIN_HOPID 8 |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 242 | #define TB_PATH_MAX_HOPS 7 |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 243 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 244 | /** |
| 245 | * struct tb_cm_ops - Connection manager specific operations vector |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 246 | * @driver_ready: Called right after control channel is started. Used by |
| 247 | * ICM to send driver ready message to the firmware. |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 248 | * @start: Starts the domain |
| 249 | * @stop: Stops the domain |
| 250 | * @suspend_noirq: Connection manager specific suspend_noirq |
| 251 | * @resume_noirq: Connection manager specific resume_noirq |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 252 | * @suspend: Connection manager specific suspend |
| 253 | * @complete: Connection manager specific complete |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 254 | * @runtime_suspend: Connection manager specific runtime_suspend |
| 255 | * @runtime_resume: Connection manager specific runtime_resume |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 256 | * @runtime_suspend_switch: Runtime suspend a switch |
| 257 | * @runtime_resume_switch: Runtime resume a switch |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 258 | * @handle_event: Handle thunderbolt event |
Mika Westerberg | 9aaa3b8 | 2018-01-21 12:08:04 +0200 | [diff] [blame] | 259 | * @get_boot_acl: Get boot ACL list |
| 260 | * @set_boot_acl: Set boot ACL list |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 261 | * @approve_switch: Approve switch |
| 262 | * @add_switch_key: Add key to switch |
| 263 | * @challenge_switch_key: Challenge switch using key |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 264 | * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 265 | * @approve_xdomain_paths: Approve (establish) XDomain DMA paths |
| 266 | * @disconnect_xdomain_paths: Disconnect XDomain DMA paths |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 267 | */ |
| 268 | struct tb_cm_ops { |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 269 | int (*driver_ready)(struct tb *tb); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 270 | int (*start)(struct tb *tb); |
| 271 | void (*stop)(struct tb *tb); |
| 272 | int (*suspend_noirq)(struct tb *tb); |
| 273 | int (*resume_noirq)(struct tb *tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 274 | int (*suspend)(struct tb *tb); |
| 275 | void (*complete)(struct tb *tb); |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 276 | int (*runtime_suspend)(struct tb *tb); |
| 277 | int (*runtime_resume)(struct tb *tb); |
Mika Westerberg | 4f7c2e0 | 2019-05-28 18:56:20 +0300 | [diff] [blame] | 278 | int (*runtime_suspend_switch)(struct tb_switch *sw); |
| 279 | int (*runtime_resume_switch)(struct tb_switch *sw); |
Mika Westerberg | 81a54b5 | 2017-06-06 15:25:09 +0300 | [diff] [blame] | 280 | void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type, |
| 281 | const void *buf, size_t size); |
Mika Westerberg | 9aaa3b8 | 2018-01-21 12:08:04 +0200 | [diff] [blame] | 282 | int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids); |
| 283 | int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 284 | int (*approve_switch)(struct tb *tb, struct tb_switch *sw); |
| 285 | int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); |
| 286 | int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, |
| 287 | const u8 *challenge, u8 *response); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 288 | int (*disconnect_pcie_paths)(struct tb *tb); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 289 | int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); |
| 290 | int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 291 | }; |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 292 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 293 | static inline void *tb_priv(struct tb *tb) |
| 294 | { |
| 295 | return (void *)tb->privdata; |
| 296 | } |
| 297 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 298 | #define TB_AUTOSUSPEND_DELAY 15000 /* ms */ |
| 299 | |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 300 | /* helper functions & macros */ |
| 301 | |
| 302 | /** |
| 303 | * tb_upstream_port() - return the upstream port of a switch |
| 304 | * |
| 305 | * Every switch has an upstream port (for the root switch it is the NHI). |
| 306 | * |
| 307 | * During switch alloc/init tb_upstream_port()->remote may be NULL, even for |
| 308 | * non root switches (on the NHI port remote is always NULL). |
| 309 | * |
| 310 | * Return: Returns the upstream port of the switch. |
| 311 | */ |
| 312 | static inline struct tb_port *tb_upstream_port(struct tb_switch *sw) |
| 313 | { |
| 314 | return &sw->ports[sw->config.upstream_port_number]; |
| 315 | } |
| 316 | |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 317 | /** |
| 318 | * tb_is_upstream_port() - Is the port upstream facing |
| 319 | * @port: Port to check |
| 320 | * |
| 321 | * Returns true if @port is upstream facing port. In case of dual link |
| 322 | * ports both return true. |
| 323 | */ |
| 324 | static inline bool tb_is_upstream_port(const struct tb_port *port) |
| 325 | { |
| 326 | const struct tb_port *upstream_port = tb_upstream_port(port->sw); |
| 327 | return port == upstream_port || port->dual_link_port == upstream_port; |
| 328 | } |
| 329 | |
Mika Westerberg | b323a98 | 2019-03-06 19:23:38 +0200 | [diff] [blame] | 330 | static inline u64 tb_route(const struct tb_switch *sw) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 331 | { |
| 332 | return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; |
| 333 | } |
| 334 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 335 | static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) |
| 336 | { |
| 337 | u8 port; |
| 338 | |
| 339 | port = route >> (sw->config.depth * 8); |
| 340 | if (WARN_ON(port > sw->config.max_port_number)) |
| 341 | return NULL; |
| 342 | return &sw->ports[port]; |
| 343 | } |
| 344 | |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 345 | /** |
| 346 | * tb_port_has_remote() - Does the port have switch connected downstream |
| 347 | * @port: Port to check |
| 348 | * |
| 349 | * Returns true only when the port is primary port and has remote set. |
| 350 | */ |
| 351 | static inline bool tb_port_has_remote(const struct tb_port *port) |
| 352 | { |
| 353 | if (tb_is_upstream_port(port)) |
| 354 | return false; |
| 355 | if (!port->remote) |
| 356 | return false; |
| 357 | if (port->dual_link_port && port->link_nr) |
| 358 | return false; |
| 359 | |
| 360 | return true; |
| 361 | } |
| 362 | |
Mika Westerberg | 344e064 | 2017-10-11 17:19:54 +0300 | [diff] [blame] | 363 | static inline bool tb_port_is_null(const struct tb_port *port) |
| 364 | { |
| 365 | return port && port->port && port->config.type == TB_TYPE_PORT; |
| 366 | } |
| 367 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 368 | static inline bool tb_port_is_pcie_down(const struct tb_port *port) |
| 369 | { |
| 370 | return port && port->config.type == TB_TYPE_PCIE_DOWN; |
| 371 | } |
| 372 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 373 | static inline bool tb_port_is_pcie_up(const struct tb_port *port) |
| 374 | { |
| 375 | return port && port->config.type == TB_TYPE_PCIE_UP; |
| 376 | } |
| 377 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 378 | static inline bool tb_port_is_dpin(const struct tb_port *port) |
| 379 | { |
| 380 | return port && port->config.type == TB_TYPE_DP_HDMI_IN; |
| 381 | } |
| 382 | |
| 383 | static inline bool tb_port_is_dpout(const struct tb_port *port) |
| 384 | { |
| 385 | return port && port->config.type == TB_TYPE_DP_HDMI_OUT; |
| 386 | } |
| 387 | |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 388 | static inline int tb_sw_read(struct tb_switch *sw, void *buffer, |
| 389 | enum tb_cfg_space space, u32 offset, u32 length) |
| 390 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 391 | if (sw->is_unplugged) |
| 392 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 393 | return tb_cfg_read(sw->tb->ctl, |
| 394 | buffer, |
| 395 | tb_route(sw), |
| 396 | 0, |
| 397 | space, |
| 398 | offset, |
| 399 | length); |
| 400 | } |
| 401 | |
Mika Westerberg | 826c6a1 | 2019-07-01 18:41:51 +0300 | [diff] [blame] | 402 | static inline int tb_sw_write(struct tb_switch *sw, const void *buffer, |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 403 | enum tb_cfg_space space, u32 offset, u32 length) |
| 404 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 405 | if (sw->is_unplugged) |
| 406 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 407 | return tb_cfg_write(sw->tb->ctl, |
| 408 | buffer, |
| 409 | tb_route(sw), |
| 410 | 0, |
| 411 | space, |
| 412 | offset, |
| 413 | length); |
| 414 | } |
| 415 | |
| 416 | static inline int tb_port_read(struct tb_port *port, void *buffer, |
| 417 | enum tb_cfg_space space, u32 offset, u32 length) |
| 418 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 419 | if (port->sw->is_unplugged) |
| 420 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 421 | return tb_cfg_read(port->sw->tb->ctl, |
| 422 | buffer, |
| 423 | tb_route(port->sw), |
| 424 | port->port, |
| 425 | space, |
| 426 | offset, |
| 427 | length); |
| 428 | } |
| 429 | |
Mika Westerberg | 16a1258 | 2017-06-06 15:24:53 +0300 | [diff] [blame] | 430 | static inline int tb_port_write(struct tb_port *port, const void *buffer, |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 431 | enum tb_cfg_space space, u32 offset, u32 length) |
| 432 | { |
Mika Westerberg | 4708384 | 2019-03-19 17:07:37 +0200 | [diff] [blame] | 433 | if (port->sw->is_unplugged) |
| 434 | return -ENODEV; |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 435 | return tb_cfg_write(port->sw->tb->ctl, |
| 436 | buffer, |
| 437 | tb_route(port->sw), |
| 438 | port->port, |
| 439 | space, |
| 440 | offset, |
| 441 | length); |
| 442 | } |
| 443 | |
| 444 | #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg) |
| 445 | #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) |
| 446 | #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) |
| 447 | #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg) |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 448 | #define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 449 | |
| 450 | #define __TB_SW_PRINT(level, sw, fmt, arg...) \ |
| 451 | do { \ |
Mika Westerberg | b323a98 | 2019-03-06 19:23:38 +0200 | [diff] [blame] | 452 | const struct tb_switch *__sw = (sw); \ |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 453 | level(__sw->tb, "%llx: " fmt, \ |
| 454 | tb_route(__sw), ## arg); \ |
| 455 | } while (0) |
| 456 | #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) |
| 457 | #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) |
| 458 | #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 459 | #define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 460 | |
| 461 | #define __TB_PORT_PRINT(level, _port, fmt, arg...) \ |
| 462 | do { \ |
Mika Westerberg | b323a98 | 2019-03-06 19:23:38 +0200 | [diff] [blame] | 463 | const struct tb_port *__port = (_port); \ |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 464 | level(__port->sw->tb, "%llx:%x: " fmt, \ |
| 465 | tb_route(__port->sw), __port->port, ## arg); \ |
| 466 | } while (0) |
| 467 | #define tb_port_WARN(port, fmt, arg...) \ |
| 468 | __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg) |
| 469 | #define tb_port_warn(port, fmt, arg...) \ |
| 470 | __TB_PORT_PRINT(tb_warn, port, fmt, ##arg) |
| 471 | #define tb_port_info(port, fmt, arg...) \ |
| 472 | __TB_PORT_PRINT(tb_info, port, fmt, ##arg) |
Mika Westerberg | daa5140 | 2018-10-01 12:31:19 +0300 | [diff] [blame] | 473 | #define tb_port_dbg(port, fmt, arg...) \ |
| 474 | __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg) |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 475 | |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 476 | struct tb *icm_probe(struct tb_nhi *nhi); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 477 | struct tb *tb_probe(struct tb_nhi *nhi); |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 478 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 479 | extern struct device_type tb_domain_type; |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 480 | extern struct device_type tb_switch_type; |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 481 | |
| 482 | int tb_domain_init(void); |
| 483 | void tb_domain_exit(void); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 484 | void tb_switch_exit(void); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 485 | int tb_xdomain_init(void); |
| 486 | void tb_xdomain_exit(void); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 487 | |
| 488 | struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); |
| 489 | int tb_domain_add(struct tb *tb); |
| 490 | void tb_domain_remove(struct tb *tb); |
| 491 | int tb_domain_suspend_noirq(struct tb *tb); |
| 492 | int tb_domain_resume_noirq(struct tb *tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 493 | int tb_domain_suspend(struct tb *tb); |
| 494 | void tb_domain_complete(struct tb *tb); |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 495 | int tb_domain_runtime_suspend(struct tb *tb); |
| 496 | int tb_domain_runtime_resume(struct tb *tb); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 497 | int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); |
| 498 | int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); |
| 499 | int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 500 | int tb_domain_disconnect_pcie_paths(struct tb *tb); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 501 | int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); |
| 502 | int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); |
| 503 | int tb_domain_disconnect_all_paths(struct tb *tb); |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 504 | |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 505 | static inline struct tb *tb_domain_get(struct tb *tb) |
| 506 | { |
| 507 | if (tb) |
| 508 | get_device(&tb->dev); |
| 509 | return tb; |
| 510 | } |
| 511 | |
Mika Westerberg | 9d3cce0 | 2017-06-06 15:25:00 +0300 | [diff] [blame] | 512 | static inline void tb_domain_put(struct tb *tb) |
| 513 | { |
| 514 | put_device(&tb->dev); |
| 515 | } |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 516 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 517 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
| 518 | u64 route); |
Mika Westerberg | e6b245c | 2017-06-06 15:25:17 +0300 | [diff] [blame] | 519 | struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, |
| 520 | struct device *parent, u64 route); |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 521 | int tb_switch_configure(struct tb_switch *sw); |
| 522 | int tb_switch_add(struct tb_switch *sw); |
| 523 | void tb_switch_remove(struct tb_switch *sw); |
Andreas Noever | 23dd5bb | 2014-06-03 22:04:12 +0200 | [diff] [blame] | 524 | void tb_switch_suspend(struct tb_switch *sw); |
| 525 | int tb_switch_resume(struct tb_switch *sw); |
| 526 | int tb_switch_reset(struct tb *tb, u64 route); |
Lukas Wunner | aae20bb | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 527 | void tb_sw_set_unplugged(struct tb_switch *sw); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 528 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, |
| 529 | u8 depth); |
Christoph Hellwig | 7c39ffe | 2017-07-18 15:30:05 +0200 | [diff] [blame] | 530 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); |
Radion Mirchevsky | 8e9267b | 2017-10-04 15:24:14 +0300 | [diff] [blame] | 531 | struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); |
Mika Westerberg | f67cf49 | 2017-06-06 15:25:16 +0300 | [diff] [blame] | 532 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame^] | 533 | /** |
| 534 | * tb_switch_for_each_port() - Iterate over each switch port |
| 535 | * @sw: Switch whose ports to iterate |
| 536 | * @p: Port used as iterator |
| 537 | * |
| 538 | * Iterates over each switch port skipping the control port (port %0). |
| 539 | */ |
| 540 | #define tb_switch_for_each_port(sw, p) \ |
| 541 | for ((p) = &(sw)->ports[1]; \ |
| 542 | (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++) |
| 543 | |
Mika Westerberg | b6b0ea7 | 2017-10-04 15:19:20 +0300 | [diff] [blame] | 544 | static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) |
| 545 | { |
| 546 | if (sw) |
| 547 | get_device(&sw->dev); |
| 548 | return sw; |
| 549 | } |
| 550 | |
Mika Westerberg | bfe778a | 2017-06-06 15:25:01 +0300 | [diff] [blame] | 551 | static inline void tb_switch_put(struct tb_switch *sw) |
| 552 | { |
| 553 | put_device(&sw->dev); |
| 554 | } |
| 555 | |
| 556 | static inline bool tb_is_switch(const struct device *dev) |
| 557 | { |
| 558 | return dev->type == &tb_switch_type; |
| 559 | } |
| 560 | |
| 561 | static inline struct tb_switch *tb_to_switch(struct device *dev) |
| 562 | { |
| 563 | if (tb_is_switch(dev)) |
| 564 | return container_of(dev, struct tb_switch, dev); |
| 565 | return NULL; |
| 566 | } |
| 567 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 568 | static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) |
| 569 | { |
| 570 | return tb_to_switch(sw->dev.parent); |
| 571 | } |
| 572 | |
Mika Westerberg | 8b0110d | 2019-01-08 18:55:09 +0200 | [diff] [blame] | 573 | static inline bool tb_switch_is_lr(const struct tb_switch *sw) |
| 574 | { |
| 575 | return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; |
| 576 | } |
| 577 | |
| 578 | static inline bool tb_switch_is_er(const struct tb_switch *sw) |
| 579 | { |
| 580 | return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; |
| 581 | } |
| 582 | |
Mika Westerberg | 99cabbb | 2018-12-30 21:34:08 +0200 | [diff] [blame] | 583 | static inline bool tb_switch_is_cr(const struct tb_switch *sw) |
| 584 | { |
| 585 | switch (sw->config.device_id) { |
| 586 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: |
| 587 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: |
| 588 | return true; |
| 589 | default: |
| 590 | return false; |
| 591 | } |
| 592 | } |
| 593 | |
| 594 | static inline bool tb_switch_is_fr(const struct tb_switch *sw) |
| 595 | { |
| 596 | switch (sw->config.device_id) { |
| 597 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: |
| 598 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: |
| 599 | return true; |
| 600 | default: |
| 601 | return false; |
| 602 | } |
| 603 | } |
| 604 | |
Mika Westerberg | f07a360 | 2019-06-25 15:10:01 +0300 | [diff] [blame] | 605 | /** |
| 606 | * tb_switch_is_icm() - Is the switch handled by ICM firmware |
| 607 | * @sw: Switch to check |
| 608 | * |
| 609 | * In case there is a need to differentiate whether ICM firmware or SW CM |
| 610 | * is handling @sw this function can be called. It is valid to call this |
| 611 | * after tb_switch_alloc() and tb_switch_configure() has been called |
| 612 | * (latter only for SW CM case). |
| 613 | */ |
| 614 | static inline bool tb_switch_is_icm(const struct tb_switch *sw) |
| 615 | { |
| 616 | return !sw->config.enabled; |
| 617 | } |
| 618 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 619 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 620 | int tb_port_add_nfc_credits(struct tb_port *port, int credits); |
Mika Westerberg | 44242d6 | 2018-09-28 16:35:32 +0300 | [diff] [blame] | 621 | int tb_port_set_initial_credits(struct tb_port *port, u32 credits); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 622 | int tb_port_clear_counter(struct tb_port *port, int counter); |
Mika Westerberg | 0b2863a | 2017-02-19 16:57:27 +0200 | [diff] [blame] | 623 | int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); |
| 624 | void tb_port_release_in_hopid(struct tb_port *port, int hopid); |
| 625 | int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); |
| 626 | void tb_port_release_out_hopid(struct tb_port *port, int hopid); |
Mika Westerberg | fb19fac | 2017-02-19 21:51:30 +0200 | [diff] [blame] | 627 | struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, |
| 628 | struct tb_port *prev); |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 629 | |
Mika Westerberg | da2da04 | 2017-06-06 15:24:58 +0300 | [diff] [blame] | 630 | int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); |
| 631 | int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); |
Mika Westerberg | e78db6f | 2017-10-12 16:45:50 +0300 | [diff] [blame] | 632 | bool tb_port_is_enabled(struct tb_port *port); |
Andreas Noever | e2b8785 | 2014-06-03 22:04:03 +0200 | [diff] [blame] | 633 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 634 | bool tb_pci_port_is_enabled(struct tb_port *port); |
Mika Westerberg | 93f36ad | 2017-02-19 13:48:29 +0200 | [diff] [blame] | 635 | int tb_pci_port_enable(struct tb_port *port, bool enable); |
| 636 | |
Mika Westerberg | 4f807e4 | 2018-09-17 16:30:49 +0300 | [diff] [blame] | 637 | int tb_dp_port_hpd_is_active(struct tb_port *port); |
| 638 | int tb_dp_port_hpd_clear(struct tb_port *port); |
| 639 | int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, |
| 640 | unsigned int aux_tx, unsigned int aux_rx); |
| 641 | bool tb_dp_port_is_enabled(struct tb_port *port); |
| 642 | int tb_dp_port_enable(struct tb_port *port, bool enable); |
| 643 | |
Mika Westerberg | 0414bec | 2017-02-19 23:43:26 +0200 | [diff] [blame] | 644 | struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, |
| 645 | struct tb_port *dst, int dst_hopid, |
| 646 | struct tb_port **last, const char *name); |
Mika Westerberg | 8c7acaaf | 2017-02-19 22:11:41 +0200 | [diff] [blame] | 647 | struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, |
| 648 | struct tb_port *dst, int dst_hopid, int link_nr, |
| 649 | const char *name); |
Andreas Noever | 520b670 | 2014-06-03 22:04:07 +0200 | [diff] [blame] | 650 | void tb_path_free(struct tb_path *path); |
| 651 | int tb_path_activate(struct tb_path *path); |
| 652 | void tb_path_deactivate(struct tb_path *path); |
| 653 | bool tb_path_is_invalid(struct tb_path *path); |
| 654 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 655 | int tb_drom_read(struct tb_switch *sw); |
| 656 | int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 657 | |
Mika Westerberg | a9be558 | 2019-01-09 16:42:12 +0200 | [diff] [blame] | 658 | int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); |
Mika Westerberg | e879a70 | 2018-10-11 12:33:08 +0300 | [diff] [blame] | 659 | int tb_lc_configure_link(struct tb_switch *sw); |
| 660 | void tb_lc_unconfigure_link(struct tb_switch *sw); |
Mika Westerberg | 5480dfc | 2019-01-09 17:25:43 +0200 | [diff] [blame] | 661 | int tb_lc_set_sleep(struct tb_switch *sw); |
Andreas Noever | a25c8b2 | 2014-06-03 22:04:02 +0200 | [diff] [blame] | 662 | |
| 663 | static inline int tb_route_length(u64 route) |
| 664 | { |
| 665 | return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; |
| 666 | } |
| 667 | |
Andreas Noever | 9da672a | 2014-06-03 22:04:05 +0200 | [diff] [blame] | 668 | /** |
| 669 | * tb_downstream_route() - get route to downstream switch |
| 670 | * |
| 671 | * Port must not be the upstream port (otherwise a loop is created). |
| 672 | * |
| 673 | * Return: Returns a route to the switch behind @port. |
| 674 | */ |
| 675 | static inline u64 tb_downstream_route(struct tb_port *port) |
| 676 | { |
| 677 | return tb_route(port->sw) |
| 678 | | ((u64) port->port << (port->sw->config.depth * 8)); |
| 679 | } |
| 680 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 681 | bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, |
| 682 | const void *buf, size_t size); |
| 683 | struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, |
| 684 | u64 route, const uuid_t *local_uuid, |
| 685 | const uuid_t *remote_uuid); |
| 686 | void tb_xdomain_add(struct tb_xdomain *xd); |
| 687 | void tb_xdomain_remove(struct tb_xdomain *xd); |
| 688 | struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, |
| 689 | u8 depth); |
| 690 | |
Andreas Noever | d6cc51c | 2014-06-03 22:04:00 +0200 | [diff] [blame] | 691 | #endif |