Mika Westerberg | fd3b339 | 2018-10-01 12:31:21 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 2 | /* |
| 3 | * Thunderbolt XDomain discovery protocol support |
| 4 | * |
| 5 | * Copyright (C) 2017, Intel Corporation |
| 6 | * Authors: Michael Jamet <michael.jamet@intel.com> |
| 7 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/device.h> |
Isaac Hazan | 5cc0df9 | 2020-09-24 11:44:01 +0300 | [diff] [blame] | 11 | #include <linux/delay.h> |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 12 | #include <linux/kmod.h> |
| 13 | #include <linux/module.h> |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 14 | #include <linux/pm_runtime.h> |
Mika Westerberg | a6932c3 | 2021-01-08 14:05:06 +0200 | [diff] [blame] | 15 | #include <linux/prandom.h> |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 16 | #include <linux/utsname.h> |
| 17 | #include <linux/uuid.h> |
| 18 | #include <linux/workqueue.h> |
| 19 | |
| 20 | #include "tb.h" |
| 21 | |
Mika Westerberg | 3bb1633 | 2021-01-08 14:17:39 +0200 | [diff] [blame] | 22 | #define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */ |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 23 | #define XDOMAIN_UUID_RETRIES 10 |
Mika Westerberg | 3bb1633 | 2021-01-08 14:17:39 +0200 | [diff] [blame] | 24 | #define XDOMAIN_PROPERTIES_RETRIES 10 |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 25 | #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 |
Isaac Hazan | 5cc0df9 | 2020-09-24 11:44:01 +0300 | [diff] [blame] | 26 | #define XDOMAIN_BONDING_WAIT 100 /* ms */ |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 27 | #define XDOMAIN_DEFAULT_MAX_HOPID 15 |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 28 | |
| 29 | struct xdomain_request_work { |
| 30 | struct work_struct work; |
| 31 | struct tb_xdp_header *pkg; |
| 32 | struct tb *tb; |
| 33 | }; |
| 34 | |
Mika Westerberg | 5ca6768 | 2020-10-22 13:22:06 +0300 | [diff] [blame] | 35 | static bool tb_xdomain_enabled = true; |
| 36 | module_param_named(xdomain, tb_xdomain_enabled, bool, 0444); |
| 37 | MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)"); |
| 38 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 39 | /* |
| 40 | * Serializes access to the properties and protocol handlers below. If |
| 41 | * you need to take both this lock and the struct tb_xdomain lock, take |
| 42 | * this one first. |
| 43 | */ |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 44 | static DEFINE_MUTEX(xdomain_lock); |
| 45 | |
| 46 | /* Properties exposed to the remote domains */ |
| 47 | static struct tb_property_dir *xdomain_property_dir; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 48 | static u32 xdomain_property_block_gen; |
| 49 | |
| 50 | /* Additional protocol handlers */ |
| 51 | static LIST_HEAD(protocol_handlers); |
| 52 | |
| 53 | /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */ |
| 54 | static const uuid_t tb_xdp_uuid = |
| 55 | UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, |
| 56 | 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); |
| 57 | |
Mika Westerberg | 5ca6768 | 2020-10-22 13:22:06 +0300 | [diff] [blame] | 58 | bool tb_is_xdomain_enabled(void) |
| 59 | { |
Mika Westerberg | c6da62a | 2020-02-18 16:14:42 +0200 | [diff] [blame] | 60 | return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed(); |
Mika Westerberg | 5ca6768 | 2020-10-22 13:22:06 +0300 | [diff] [blame] | 61 | } |
| 62 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 63 | static bool tb_xdomain_match(const struct tb_cfg_request *req, |
| 64 | const struct ctl_pkg *pkg) |
| 65 | { |
| 66 | switch (pkg->frame.eof) { |
| 67 | case TB_CFG_PKG_ERROR: |
| 68 | return true; |
| 69 | |
| 70 | case TB_CFG_PKG_XDOMAIN_RESP: { |
| 71 | const struct tb_xdp_header *res_hdr = pkg->buffer; |
| 72 | const struct tb_xdp_header *req_hdr = req->request; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 73 | |
| 74 | if (pkg->frame.size < req->response_size / 4) |
| 75 | return false; |
| 76 | |
| 77 | /* Make sure route matches */ |
| 78 | if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) != |
| 79 | req_hdr->xd_hdr.route_hi) |
| 80 | return false; |
| 81 | if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo) |
| 82 | return false; |
| 83 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 84 | /* Check that the XDomain protocol matches */ |
| 85 | if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid)) |
| 86 | return false; |
| 87 | |
| 88 | return true; |
| 89 | } |
| 90 | |
| 91 | default: |
| 92 | return false; |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | static bool tb_xdomain_copy(struct tb_cfg_request *req, |
| 97 | const struct ctl_pkg *pkg) |
| 98 | { |
| 99 | memcpy(req->response, pkg->buffer, req->response_size); |
| 100 | req->result.err = 0; |
| 101 | return true; |
| 102 | } |
| 103 | |
| 104 | static void response_ready(void *data) |
| 105 | { |
| 106 | tb_cfg_request_put(data); |
| 107 | } |
| 108 | |
| 109 | static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response, |
| 110 | size_t size, enum tb_cfg_pkg_type type) |
| 111 | { |
| 112 | struct tb_cfg_request *req; |
| 113 | |
| 114 | req = tb_cfg_request_alloc(); |
| 115 | if (!req) |
| 116 | return -ENOMEM; |
| 117 | |
| 118 | req->match = tb_xdomain_match; |
| 119 | req->copy = tb_xdomain_copy; |
| 120 | req->request = response; |
| 121 | req->request_size = size; |
| 122 | req->request_type = type; |
| 123 | |
| 124 | return tb_cfg_request(ctl, req, response_ready, req); |
| 125 | } |
| 126 | |
| 127 | /** |
| 128 | * tb_xdomain_response() - Send a XDomain response message |
| 129 | * @xd: XDomain to send the message |
| 130 | * @response: Response to send |
| 131 | * @size: Size of the response |
| 132 | * @type: PDF type of the response |
| 133 | * |
| 134 | * This can be used to send a XDomain response message to the other |
| 135 | * domain. No response for the message is expected. |
| 136 | * |
| 137 | * Return: %0 in case of success and negative errno in case of failure |
| 138 | */ |
| 139 | int tb_xdomain_response(struct tb_xdomain *xd, const void *response, |
| 140 | size_t size, enum tb_cfg_pkg_type type) |
| 141 | { |
| 142 | return __tb_xdomain_response(xd->tb->ctl, response, size, type); |
| 143 | } |
| 144 | EXPORT_SYMBOL_GPL(tb_xdomain_response); |
| 145 | |
| 146 | static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request, |
| 147 | size_t request_size, enum tb_cfg_pkg_type request_type, void *response, |
| 148 | size_t response_size, enum tb_cfg_pkg_type response_type, |
| 149 | unsigned int timeout_msec) |
| 150 | { |
| 151 | struct tb_cfg_request *req; |
| 152 | struct tb_cfg_result res; |
| 153 | |
| 154 | req = tb_cfg_request_alloc(); |
| 155 | if (!req) |
| 156 | return -ENOMEM; |
| 157 | |
| 158 | req->match = tb_xdomain_match; |
| 159 | req->copy = tb_xdomain_copy; |
| 160 | req->request = request; |
| 161 | req->request_size = request_size; |
| 162 | req->request_type = request_type; |
| 163 | req->response = response; |
| 164 | req->response_size = response_size; |
| 165 | req->response_type = response_type; |
| 166 | |
| 167 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
| 168 | |
| 169 | tb_cfg_request_put(req); |
| 170 | |
| 171 | return res.err == 1 ? -EIO : res.err; |
| 172 | } |
| 173 | |
| 174 | /** |
| 175 | * tb_xdomain_request() - Send a XDomain request |
| 176 | * @xd: XDomain to send the request |
| 177 | * @request: Request to send |
| 178 | * @request_size: Size of the request in bytes |
| 179 | * @request_type: PDF type of the request |
| 180 | * @response: Response is copied here |
| 181 | * @response_size: Expected size of the response in bytes |
| 182 | * @response_type: Expected PDF type of the response |
| 183 | * @timeout_msec: Timeout in milliseconds to wait for the response |
| 184 | * |
| 185 | * This function can be used to send XDomain control channel messages to |
| 186 | * the other domain. The function waits until the response is received |
| 187 | * or when timeout triggers. Whichever comes first. |
| 188 | * |
| 189 | * Return: %0 in case of success and negative errno in case of failure |
| 190 | */ |
| 191 | int tb_xdomain_request(struct tb_xdomain *xd, const void *request, |
| 192 | size_t request_size, enum tb_cfg_pkg_type request_type, |
| 193 | void *response, size_t response_size, |
| 194 | enum tb_cfg_pkg_type response_type, unsigned int timeout_msec) |
| 195 | { |
| 196 | return __tb_xdomain_request(xd->tb->ctl, request, request_size, |
| 197 | request_type, response, response_size, |
| 198 | response_type, timeout_msec); |
| 199 | } |
| 200 | EXPORT_SYMBOL_GPL(tb_xdomain_request); |
| 201 | |
| 202 | static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, |
| 203 | u8 sequence, enum tb_xdp_type type, size_t size) |
| 204 | { |
| 205 | u32 length_sn; |
| 206 | |
| 207 | length_sn = (size - sizeof(hdr->xd_hdr)) / 4; |
| 208 | length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK; |
| 209 | |
| 210 | hdr->xd_hdr.route_hi = upper_32_bits(route); |
| 211 | hdr->xd_hdr.route_lo = lower_32_bits(route); |
| 212 | hdr->xd_hdr.length_sn = length_sn; |
| 213 | hdr->type = type; |
| 214 | memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); |
| 215 | } |
| 216 | |
Kees Cook | 1981355 | 2021-12-06 22:34:13 -0800 | [diff] [blame] | 217 | static int tb_xdp_handle_error(const struct tb_xdp_error_response *res) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 218 | { |
Kees Cook | 1981355 | 2021-12-06 22:34:13 -0800 | [diff] [blame] | 219 | if (res->hdr.type != ERROR_RESPONSE) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 220 | return 0; |
| 221 | |
Kees Cook | 1981355 | 2021-12-06 22:34:13 -0800 | [diff] [blame] | 222 | switch (res->error) { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 223 | case ERROR_UNKNOWN_PACKET: |
| 224 | case ERROR_UNKNOWN_DOMAIN: |
| 225 | return -EIO; |
| 226 | case ERROR_NOT_SUPPORTED: |
| 227 | return -ENOTSUPP; |
| 228 | case ERROR_NOT_READY: |
| 229 | return -EAGAIN; |
| 230 | default: |
| 231 | break; |
| 232 | } |
| 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 237 | static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, |
| 238 | uuid_t *uuid) |
| 239 | { |
| 240 | struct tb_xdp_uuid_response res; |
| 241 | struct tb_xdp_uuid req; |
| 242 | int ret; |
| 243 | |
| 244 | memset(&req, 0, sizeof(req)); |
| 245 | tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST, |
| 246 | sizeof(req)); |
| 247 | |
| 248 | memset(&res, 0, sizeof(res)); |
| 249 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 250 | TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), |
| 251 | TB_CFG_PKG_XDOMAIN_RESP, |
| 252 | XDOMAIN_DEFAULT_TIMEOUT); |
| 253 | if (ret) |
| 254 | return ret; |
| 255 | |
Kees Cook | 1981355 | 2021-12-06 22:34:13 -0800 | [diff] [blame] | 256 | ret = tb_xdp_handle_error(&res.err); |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 257 | if (ret) |
| 258 | return ret; |
| 259 | |
| 260 | uuid_copy(uuid, &res.src_uuid); |
| 261 | return 0; |
| 262 | } |
| 263 | |
| 264 | static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence, |
| 265 | const uuid_t *uuid) |
| 266 | { |
| 267 | struct tb_xdp_uuid_response res; |
| 268 | |
| 269 | memset(&res, 0, sizeof(res)); |
| 270 | tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE, |
| 271 | sizeof(res)); |
| 272 | |
| 273 | uuid_copy(&res.src_uuid, uuid); |
| 274 | res.src_route_hi = upper_32_bits(route); |
| 275 | res.src_route_lo = lower_32_bits(route); |
| 276 | |
| 277 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 278 | TB_CFG_PKG_XDOMAIN_RESP); |
| 279 | } |
| 280 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 281 | static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, |
| 282 | enum tb_xdp_error error) |
| 283 | { |
| 284 | struct tb_xdp_error_response res; |
| 285 | |
| 286 | memset(&res, 0, sizeof(res)); |
| 287 | tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE, |
| 288 | sizeof(res)); |
| 289 | res.error = error; |
| 290 | |
| 291 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 292 | TB_CFG_PKG_XDOMAIN_RESP); |
| 293 | } |
| 294 | |
| 295 | static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, |
| 296 | const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry, |
| 297 | u32 **block, u32 *generation) |
| 298 | { |
| 299 | struct tb_xdp_properties_response *res; |
| 300 | struct tb_xdp_properties req; |
| 301 | u16 data_len, len; |
| 302 | size_t total_size; |
| 303 | u32 *data = NULL; |
| 304 | int ret; |
| 305 | |
| 306 | total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4; |
| 307 | res = kzalloc(total_size, GFP_KERNEL); |
| 308 | if (!res) |
| 309 | return -ENOMEM; |
| 310 | |
| 311 | memset(&req, 0, sizeof(req)); |
| 312 | tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST, |
| 313 | sizeof(req)); |
| 314 | memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); |
| 315 | memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); |
| 316 | |
| 317 | len = 0; |
| 318 | data_len = 0; |
| 319 | |
| 320 | do { |
| 321 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 322 | TB_CFG_PKG_XDOMAIN_REQ, res, |
| 323 | total_size, TB_CFG_PKG_XDOMAIN_RESP, |
| 324 | XDOMAIN_DEFAULT_TIMEOUT); |
| 325 | if (ret) |
| 326 | goto err; |
| 327 | |
Kees Cook | 1981355 | 2021-12-06 22:34:13 -0800 | [diff] [blame] | 328 | ret = tb_xdp_handle_error(&res->err); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 329 | if (ret) |
| 330 | goto err; |
| 331 | |
| 332 | /* |
| 333 | * Package length includes the whole payload without the |
| 334 | * XDomain header. Validate first that the package is at |
| 335 | * least size of the response structure. |
| 336 | */ |
| 337 | len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
| 338 | if (len < sizeof(*res) / 4) { |
| 339 | ret = -EINVAL; |
| 340 | goto err; |
| 341 | } |
| 342 | |
| 343 | len += sizeof(res->hdr.xd_hdr) / 4; |
| 344 | len -= sizeof(*res) / 4; |
| 345 | |
| 346 | if (res->offset != req.offset) { |
| 347 | ret = -EINVAL; |
| 348 | goto err; |
| 349 | } |
| 350 | |
| 351 | /* |
| 352 | * First time allocate block that has enough space for |
| 353 | * the whole properties block. |
| 354 | */ |
| 355 | if (!data) { |
| 356 | data_len = res->data_length; |
| 357 | if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) { |
| 358 | ret = -E2BIG; |
| 359 | goto err; |
| 360 | } |
| 361 | |
| 362 | data = kcalloc(data_len, sizeof(u32), GFP_KERNEL); |
| 363 | if (!data) { |
| 364 | ret = -ENOMEM; |
| 365 | goto err; |
| 366 | } |
| 367 | } |
| 368 | |
| 369 | memcpy(data + req.offset, res->data, len * 4); |
| 370 | req.offset += len; |
| 371 | } while (!data_len || req.offset < data_len); |
| 372 | |
| 373 | *block = data; |
| 374 | *generation = res->generation; |
| 375 | |
| 376 | kfree(res); |
| 377 | |
| 378 | return data_len; |
| 379 | |
| 380 | err: |
| 381 | kfree(data); |
| 382 | kfree(res); |
| 383 | |
| 384 | return ret; |
| 385 | } |
| 386 | |
| 387 | static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 388 | struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 389 | { |
| 390 | struct tb_xdp_properties_response *res; |
| 391 | size_t total_size; |
| 392 | u16 len; |
| 393 | int ret; |
| 394 | |
| 395 | /* |
| 396 | * Currently we expect all requests to be directed to us. The |
| 397 | * protocol supports forwarding, though which we might add |
| 398 | * support later on. |
| 399 | */ |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 400 | if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) { |
| 401 | tb_xdp_error_response(ctl, xd->route, sequence, |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 402 | ERROR_UNKNOWN_DOMAIN); |
| 403 | return 0; |
| 404 | } |
| 405 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 406 | mutex_lock(&xd->lock); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 407 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 408 | if (req->offset >= xd->local_property_block_len) { |
| 409 | mutex_unlock(&xd->lock); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 410 | return -EINVAL; |
| 411 | } |
| 412 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 413 | len = xd->local_property_block_len - req->offset; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 414 | len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); |
| 415 | total_size = sizeof(*res) + len * 4; |
| 416 | |
| 417 | res = kzalloc(total_size, GFP_KERNEL); |
| 418 | if (!res) { |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 419 | mutex_unlock(&xd->lock); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 420 | return -ENOMEM; |
| 421 | } |
| 422 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 423 | tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE, |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 424 | total_size); |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 425 | res->generation = xd->local_property_block_gen; |
| 426 | res->data_length = xd->local_property_block_len; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 427 | res->offset = req->offset; |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 428 | uuid_copy(&res->src_uuid, xd->local_uuid); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 429 | uuid_copy(&res->dst_uuid, &req->src_uuid); |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 430 | memcpy(res->data, &xd->local_property_block[req->offset], len * 4); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 431 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 432 | mutex_unlock(&xd->lock); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 433 | |
| 434 | ret = __tb_xdomain_response(ctl, res, total_size, |
| 435 | TB_CFG_PKG_XDOMAIN_RESP); |
| 436 | |
| 437 | kfree(res); |
| 438 | return ret; |
| 439 | } |
| 440 | |
| 441 | static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, |
| 442 | int retry, const uuid_t *uuid) |
| 443 | { |
| 444 | struct tb_xdp_properties_changed_response res; |
| 445 | struct tb_xdp_properties_changed req; |
| 446 | int ret; |
| 447 | |
| 448 | memset(&req, 0, sizeof(req)); |
| 449 | tb_xdp_fill_header(&req.hdr, route, retry % 4, |
| 450 | PROPERTIES_CHANGED_REQUEST, sizeof(req)); |
| 451 | uuid_copy(&req.src_uuid, uuid); |
| 452 | |
| 453 | memset(&res, 0, sizeof(res)); |
| 454 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 455 | TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), |
| 456 | TB_CFG_PKG_XDOMAIN_RESP, |
| 457 | XDOMAIN_DEFAULT_TIMEOUT); |
| 458 | if (ret) |
| 459 | return ret; |
| 460 | |
Kees Cook | 1981355 | 2021-12-06 22:34:13 -0800 | [diff] [blame] | 461 | return tb_xdp_handle_error(&res.err); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | static int |
| 465 | tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) |
| 466 | { |
| 467 | struct tb_xdp_properties_changed_response res; |
| 468 | |
| 469 | memset(&res, 0, sizeof(res)); |
| 470 | tb_xdp_fill_header(&res.hdr, route, sequence, |
| 471 | PROPERTIES_CHANGED_RESPONSE, sizeof(res)); |
| 472 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 473 | TB_CFG_PKG_XDOMAIN_RESP); |
| 474 | } |
| 475 | |
| 476 | /** |
| 477 | * tb_register_protocol_handler() - Register protocol handler |
| 478 | * @handler: Handler to register |
| 479 | * |
| 480 | * This allows XDomain service drivers to hook into incoming XDomain |
| 481 | * messages. After this function is called the service driver needs to |
| 482 | * be able to handle calls to callback whenever a package with the |
| 483 | * registered protocol is received. |
| 484 | */ |
| 485 | int tb_register_protocol_handler(struct tb_protocol_handler *handler) |
| 486 | { |
| 487 | if (!handler->uuid || !handler->callback) |
| 488 | return -EINVAL; |
| 489 | if (uuid_equal(handler->uuid, &tb_xdp_uuid)) |
| 490 | return -EINVAL; |
| 491 | |
| 492 | mutex_lock(&xdomain_lock); |
| 493 | list_add_tail(&handler->list, &protocol_handlers); |
| 494 | mutex_unlock(&xdomain_lock); |
| 495 | |
| 496 | return 0; |
| 497 | } |
| 498 | EXPORT_SYMBOL_GPL(tb_register_protocol_handler); |
| 499 | |
| 500 | /** |
| 501 | * tb_unregister_protocol_handler() - Unregister protocol handler |
| 502 | * @handler: Handler to unregister |
| 503 | * |
| 504 | * Removes the previously registered protocol handler. |
| 505 | */ |
| 506 | void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) |
| 507 | { |
| 508 | mutex_lock(&xdomain_lock); |
| 509 | list_del_init(&handler->list); |
| 510 | mutex_unlock(&xdomain_lock); |
| 511 | } |
| 512 | EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); |
| 513 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 514 | static void update_property_block(struct tb_xdomain *xd) |
Mika Westerberg | 8a00c67 | 2020-05-26 17:26:38 +0300 | [diff] [blame] | 515 | { |
Mika Westerberg | 8a00c67 | 2020-05-26 17:26:38 +0300 | [diff] [blame] | 516 | mutex_lock(&xdomain_lock); |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 517 | mutex_lock(&xd->lock); |
| 518 | /* |
| 519 | * If the local property block is not up-to-date, rebuild it now |
| 520 | * based on the global property template. |
| 521 | */ |
| 522 | if (!xd->local_property_block || |
| 523 | xd->local_property_block_gen < xdomain_property_block_gen) { |
| 524 | struct tb_property_dir *dir; |
| 525 | int ret, block_len; |
| 526 | u32 *block; |
| 527 | |
| 528 | dir = tb_property_copy_dir(xdomain_property_dir); |
| 529 | if (!dir) { |
| 530 | dev_warn(&xd->dev, "failed to copy properties\n"); |
| 531 | goto out_unlock; |
| 532 | } |
| 533 | |
| 534 | /* Fill in non-static properties now */ |
| 535 | tb_property_add_text(dir, "deviceid", utsname()->nodename); |
| 536 | tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid); |
| 537 | |
| 538 | ret = tb_property_format_dir(dir, NULL, 0); |
| 539 | if (ret < 0) { |
| 540 | dev_warn(&xd->dev, "local property block creation failed\n"); |
| 541 | tb_property_free_dir(dir); |
| 542 | goto out_unlock; |
| 543 | } |
| 544 | |
| 545 | block_len = ret; |
| 546 | block = kcalloc(block_len, sizeof(*block), GFP_KERNEL); |
| 547 | if (!block) { |
| 548 | tb_property_free_dir(dir); |
| 549 | goto out_unlock; |
| 550 | } |
| 551 | |
| 552 | ret = tb_property_format_dir(dir, block, block_len); |
| 553 | if (ret) { |
| 554 | dev_warn(&xd->dev, "property block generation failed\n"); |
| 555 | tb_property_free_dir(dir); |
| 556 | kfree(block); |
| 557 | goto out_unlock; |
| 558 | } |
| 559 | |
| 560 | tb_property_free_dir(dir); |
| 561 | /* Release the previous block */ |
| 562 | kfree(xd->local_property_block); |
| 563 | /* Assign new one */ |
| 564 | xd->local_property_block = block; |
| 565 | xd->local_property_block_len = block_len; |
| 566 | xd->local_property_block_gen = xdomain_property_block_gen; |
Mika Westerberg | 8a00c67 | 2020-05-26 17:26:38 +0300 | [diff] [blame] | 567 | } |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 568 | |
| 569 | out_unlock: |
| 570 | mutex_unlock(&xd->lock); |
Mika Westerberg | 8a00c67 | 2020-05-26 17:26:38 +0300 | [diff] [blame] | 571 | mutex_unlock(&xdomain_lock); |
| 572 | } |
| 573 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 574 | static void tb_xdp_handle_request(struct work_struct *work) |
| 575 | { |
| 576 | struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); |
| 577 | const struct tb_xdp_header *pkg = xw->pkg; |
| 578 | const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; |
| 579 | struct tb *tb = xw->tb; |
| 580 | struct tb_ctl *ctl = tb->ctl; |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 581 | struct tb_xdomain *xd; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 582 | const uuid_t *uuid; |
| 583 | int ret = 0; |
Dan Carpenter | 9a03c3d | 2017-10-17 15:32:17 +0300 | [diff] [blame] | 584 | u32 sequence; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 585 | u64 route; |
| 586 | |
| 587 | route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); |
| 588 | sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK; |
| 589 | sequence >>= TB_XDOMAIN_SN_SHIFT; |
| 590 | |
| 591 | mutex_lock(&tb->lock); |
| 592 | if (tb->root_switch) |
| 593 | uuid = tb->root_switch->uuid; |
| 594 | else |
| 595 | uuid = NULL; |
| 596 | mutex_unlock(&tb->lock); |
| 597 | |
| 598 | if (!uuid) { |
| 599 | tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); |
| 600 | goto out; |
| 601 | } |
| 602 | |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 603 | tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type); |
| 604 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 605 | xd = tb_xdomain_find_by_route_locked(tb, route); |
| 606 | if (xd) |
| 607 | update_property_block(xd); |
| 608 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 609 | switch (pkg->type) { |
| 610 | case PROPERTIES_REQUEST: |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 611 | if (xd) { |
| 612 | ret = tb_xdp_properties_response(tb, ctl, xd, sequence, |
| 613 | (const struct tb_xdp_properties *)pkg); |
| 614 | } |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 615 | break; |
| 616 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 617 | case PROPERTIES_CHANGED_REQUEST: |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 618 | ret = tb_xdp_properties_changed_response(ctl, route, sequence); |
| 619 | |
| 620 | /* |
| 621 | * Since the properties have been changed, let's update |
| 622 | * the xdomain related to this connection as well in |
| 623 | * case there is a change in services it offers. |
| 624 | */ |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 625 | if (xd && device_is_registered(&xd->dev)) { |
| 626 | queue_delayed_work(tb->wq, &xd->get_properties_work, |
| 627 | msecs_to_jiffies(50)); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 628 | } |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 629 | break; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 630 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 631 | case UUID_REQUEST_OLD: |
| 632 | case UUID_REQUEST: |
| 633 | ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); |
| 634 | break; |
| 635 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 636 | default: |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 637 | tb_xdp_error_response(ctl, route, sequence, |
| 638 | ERROR_NOT_SUPPORTED); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 639 | break; |
| 640 | } |
| 641 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 642 | tb_xdomain_put(xd); |
| 643 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 644 | if (ret) { |
| 645 | tb_warn(tb, "failed to send XDomain response for %#x\n", |
| 646 | pkg->type); |
| 647 | } |
| 648 | |
| 649 | out: |
| 650 | kfree(xw->pkg); |
| 651 | kfree(xw); |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 652 | |
| 653 | tb_domain_put(tb); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 654 | } |
| 655 | |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 656 | static bool |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 657 | tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, |
| 658 | size_t size) |
| 659 | { |
| 660 | struct xdomain_request_work *xw; |
| 661 | |
| 662 | xw = kmalloc(sizeof(*xw), GFP_KERNEL); |
| 663 | if (!xw) |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 664 | return false; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 665 | |
| 666 | INIT_WORK(&xw->work, tb_xdp_handle_request); |
| 667 | xw->pkg = kmemdup(hdr, size, GFP_KERNEL); |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 668 | if (!xw->pkg) { |
| 669 | kfree(xw); |
| 670 | return false; |
| 671 | } |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 672 | xw->tb = tb_domain_get(tb); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 673 | |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 674 | schedule_work(&xw->work); |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 675 | return true; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 676 | } |
| 677 | |
| 678 | /** |
| 679 | * tb_register_service_driver() - Register XDomain service driver |
| 680 | * @drv: Driver to register |
| 681 | * |
| 682 | * Registers new service driver from @drv to the bus. |
| 683 | */ |
| 684 | int tb_register_service_driver(struct tb_service_driver *drv) |
| 685 | { |
| 686 | drv->driver.bus = &tb_bus_type; |
| 687 | return driver_register(&drv->driver); |
| 688 | } |
| 689 | EXPORT_SYMBOL_GPL(tb_register_service_driver); |
| 690 | |
| 691 | /** |
| 692 | * tb_unregister_service_driver() - Unregister XDomain service driver |
Lee Jones | 925a2af | 2021-01-27 11:25:48 +0000 | [diff] [blame] | 693 | * @drv: Driver to unregister |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 694 | * |
| 695 | * Unregisters XDomain service driver from the bus. |
| 696 | */ |
| 697 | void tb_unregister_service_driver(struct tb_service_driver *drv) |
| 698 | { |
| 699 | driver_unregister(&drv->driver); |
| 700 | } |
| 701 | EXPORT_SYMBOL_GPL(tb_unregister_service_driver); |
| 702 | |
| 703 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
| 704 | char *buf) |
| 705 | { |
| 706 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 707 | |
| 708 | /* |
| 709 | * It should be null terminated but anything else is pretty much |
| 710 | * allowed. |
| 711 | */ |
J. Bruce Fields | 04f7745 | 2019-08-06 06:57:50 -0400 | [diff] [blame] | 712 | return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 713 | } |
| 714 | static DEVICE_ATTR_RO(key); |
| 715 | |
| 716 | static int get_modalias(struct tb_service *svc, char *buf, size_t size) |
| 717 | { |
| 718 | return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key, |
| 719 | svc->prtcid, svc->prtcvers, svc->prtcrevs); |
| 720 | } |
| 721 | |
| 722 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
| 723 | char *buf) |
| 724 | { |
| 725 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 726 | |
| 727 | /* Full buffer size except new line and null termination */ |
| 728 | get_modalias(svc, buf, PAGE_SIZE - 2); |
Arnd Bergmann | 0a0624a | 2021-09-27 15:16:11 +0200 | [diff] [blame] | 729 | return strlen(strcat(buf, "\n")); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 730 | } |
| 731 | static DEVICE_ATTR_RO(modalias); |
| 732 | |
| 733 | static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, |
| 734 | char *buf) |
| 735 | { |
| 736 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 737 | |
| 738 | return sprintf(buf, "%u\n", svc->prtcid); |
| 739 | } |
| 740 | static DEVICE_ATTR_RO(prtcid); |
| 741 | |
| 742 | static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, |
| 743 | char *buf) |
| 744 | { |
| 745 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 746 | |
| 747 | return sprintf(buf, "%u\n", svc->prtcvers); |
| 748 | } |
| 749 | static DEVICE_ATTR_RO(prtcvers); |
| 750 | |
| 751 | static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, |
| 752 | char *buf) |
| 753 | { |
| 754 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 755 | |
| 756 | return sprintf(buf, "%u\n", svc->prtcrevs); |
| 757 | } |
| 758 | static DEVICE_ATTR_RO(prtcrevs); |
| 759 | |
| 760 | static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, |
| 761 | char *buf) |
| 762 | { |
| 763 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 764 | |
| 765 | return sprintf(buf, "0x%08x\n", svc->prtcstns); |
| 766 | } |
| 767 | static DEVICE_ATTR_RO(prtcstns); |
| 768 | |
| 769 | static struct attribute *tb_service_attrs[] = { |
| 770 | &dev_attr_key.attr, |
| 771 | &dev_attr_modalias.attr, |
| 772 | &dev_attr_prtcid.attr, |
| 773 | &dev_attr_prtcvers.attr, |
| 774 | &dev_attr_prtcrevs.attr, |
| 775 | &dev_attr_prtcstns.attr, |
| 776 | NULL, |
| 777 | }; |
| 778 | |
Rikard Falkeborn | 6889e00f | 2021-01-09 00:09:19 +0100 | [diff] [blame] | 779 | static const struct attribute_group tb_service_attr_group = { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 780 | .attrs = tb_service_attrs, |
| 781 | }; |
| 782 | |
| 783 | static const struct attribute_group *tb_service_attr_groups[] = { |
| 784 | &tb_service_attr_group, |
| 785 | NULL, |
| 786 | }; |
| 787 | |
| 788 | static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env) |
| 789 | { |
| 790 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 791 | char modalias[64]; |
| 792 | |
| 793 | get_modalias(svc, modalias, sizeof(modalias)); |
| 794 | return add_uevent_var(env, "MODALIAS=%s", modalias); |
| 795 | } |
| 796 | |
| 797 | static void tb_service_release(struct device *dev) |
| 798 | { |
| 799 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 800 | struct tb_xdomain *xd = tb_service_parent(svc); |
| 801 | |
Mika Westerberg | 407ac93 | 2020-10-07 17:53:44 +0300 | [diff] [blame] | 802 | tb_service_debugfs_remove(svc); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 803 | ida_simple_remove(&xd->service_ids, svc->id); |
| 804 | kfree(svc->key); |
| 805 | kfree(svc); |
| 806 | } |
| 807 | |
| 808 | struct device_type tb_service_type = { |
| 809 | .name = "thunderbolt_service", |
| 810 | .groups = tb_service_attr_groups, |
| 811 | .uevent = tb_service_uevent, |
| 812 | .release = tb_service_release, |
| 813 | }; |
| 814 | EXPORT_SYMBOL_GPL(tb_service_type); |
| 815 | |
| 816 | static int remove_missing_service(struct device *dev, void *data) |
| 817 | { |
| 818 | struct tb_xdomain *xd = data; |
| 819 | struct tb_service *svc; |
| 820 | |
| 821 | svc = tb_to_service(dev); |
| 822 | if (!svc) |
| 823 | return 0; |
| 824 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 825 | if (!tb_property_find(xd->remote_properties, svc->key, |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 826 | TB_PROPERTY_TYPE_DIRECTORY)) |
| 827 | device_unregister(dev); |
| 828 | |
| 829 | return 0; |
| 830 | } |
| 831 | |
| 832 | static int find_service(struct device *dev, void *data) |
| 833 | { |
| 834 | const struct tb_property *p = data; |
| 835 | struct tb_service *svc; |
| 836 | |
| 837 | svc = tb_to_service(dev); |
| 838 | if (!svc) |
| 839 | return 0; |
| 840 | |
| 841 | return !strcmp(svc->key, p->key); |
| 842 | } |
| 843 | |
| 844 | static int populate_service(struct tb_service *svc, |
| 845 | struct tb_property *property) |
| 846 | { |
| 847 | struct tb_property_dir *dir = property->value.dir; |
| 848 | struct tb_property *p; |
| 849 | |
| 850 | /* Fill in standard properties */ |
| 851 | p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE); |
| 852 | if (p) |
| 853 | svc->prtcid = p->value.immediate; |
| 854 | p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); |
| 855 | if (p) |
| 856 | svc->prtcvers = p->value.immediate; |
| 857 | p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); |
| 858 | if (p) |
| 859 | svc->prtcrevs = p->value.immediate; |
| 860 | p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); |
| 861 | if (p) |
| 862 | svc->prtcstns = p->value.immediate; |
| 863 | |
| 864 | svc->key = kstrdup(property->key, GFP_KERNEL); |
| 865 | if (!svc->key) |
| 866 | return -ENOMEM; |
| 867 | |
| 868 | return 0; |
| 869 | } |
| 870 | |
| 871 | static void enumerate_services(struct tb_xdomain *xd) |
| 872 | { |
| 873 | struct tb_service *svc; |
| 874 | struct tb_property *p; |
| 875 | struct device *dev; |
Aditya Pakki | 9aabb68 | 2019-03-20 11:34:09 -0500 | [diff] [blame] | 876 | int id; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 877 | |
| 878 | /* |
| 879 | * First remove all services that are not available anymore in |
| 880 | * the updated property block. |
| 881 | */ |
| 882 | device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); |
| 883 | |
| 884 | /* Then re-enumerate properties creating new services as we go */ |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 885 | tb_property_for_each(xd->remote_properties, p) { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 886 | if (p->type != TB_PROPERTY_TYPE_DIRECTORY) |
| 887 | continue; |
| 888 | |
| 889 | /* If the service exists already we are fine */ |
| 890 | dev = device_find_child(&xd->dev, p, find_service); |
| 891 | if (dev) { |
| 892 | put_device(dev); |
| 893 | continue; |
| 894 | } |
| 895 | |
| 896 | svc = kzalloc(sizeof(*svc), GFP_KERNEL); |
| 897 | if (!svc) |
| 898 | break; |
| 899 | |
| 900 | if (populate_service(svc, p)) { |
| 901 | kfree(svc); |
| 902 | break; |
| 903 | } |
| 904 | |
Aditya Pakki | 9aabb68 | 2019-03-20 11:34:09 -0500 | [diff] [blame] | 905 | id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); |
| 906 | if (id < 0) { |
Mika Westerberg | a663e0d | 2020-10-07 17:06:17 +0300 | [diff] [blame] | 907 | kfree(svc->key); |
Aditya Pakki | 9aabb68 | 2019-03-20 11:34:09 -0500 | [diff] [blame] | 908 | kfree(svc); |
| 909 | break; |
| 910 | } |
| 911 | svc->id = id; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 912 | svc->dev.bus = &tb_bus_type; |
| 913 | svc->dev.type = &tb_service_type; |
| 914 | svc->dev.parent = &xd->dev; |
| 915 | dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); |
| 916 | |
Mika Westerberg | 407ac93 | 2020-10-07 17:53:44 +0300 | [diff] [blame] | 917 | tb_service_debugfs_init(svc); |
| 918 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 919 | if (device_register(&svc->dev)) { |
| 920 | put_device(&svc->dev); |
| 921 | break; |
| 922 | } |
| 923 | } |
| 924 | } |
| 925 | |
| 926 | static int populate_properties(struct tb_xdomain *xd, |
| 927 | struct tb_property_dir *dir) |
| 928 | { |
| 929 | const struct tb_property *p; |
| 930 | |
| 931 | /* Required properties */ |
| 932 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); |
| 933 | if (!p) |
| 934 | return -EINVAL; |
| 935 | xd->device = p->value.immediate; |
| 936 | |
| 937 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); |
| 938 | if (!p) |
| 939 | return -EINVAL; |
| 940 | xd->vendor = p->value.immediate; |
| 941 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 942 | p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE); |
| 943 | /* |
| 944 | * USB4 inter-domain spec suggests using 15 as HopID if the |
| 945 | * other end does not announce it in a property. This is for |
| 946 | * TBT3 compatibility. |
| 947 | */ |
| 948 | xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID; |
| 949 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 950 | kfree(xd->device_name); |
| 951 | xd->device_name = NULL; |
| 952 | kfree(xd->vendor_name); |
| 953 | xd->vendor_name = NULL; |
| 954 | |
| 955 | /* Optional properties */ |
| 956 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); |
| 957 | if (p) |
| 958 | xd->device_name = kstrdup(p->value.text, GFP_KERNEL); |
| 959 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); |
| 960 | if (p) |
| 961 | xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); |
| 962 | |
| 963 | return 0; |
| 964 | } |
| 965 | |
Isaac Hazan | 4210d50 | 2020-09-24 11:43:58 +0300 | [diff] [blame] | 966 | static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) |
| 967 | { |
| 968 | return tb_to_switch(xd->dev.parent); |
| 969 | } |
| 970 | |
| 971 | static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd) |
| 972 | { |
| 973 | bool change = false; |
| 974 | struct tb_port *port; |
| 975 | int ret; |
| 976 | |
| 977 | port = tb_port_at(xd->route, tb_xdomain_parent(xd)); |
| 978 | |
| 979 | ret = tb_port_get_link_speed(port); |
| 980 | if (ret < 0) |
| 981 | return ret; |
| 982 | |
| 983 | if (xd->link_speed != ret) |
| 984 | change = true; |
| 985 | |
| 986 | xd->link_speed = ret; |
| 987 | |
| 988 | ret = tb_port_get_link_width(port); |
| 989 | if (ret < 0) |
| 990 | return ret; |
| 991 | |
| 992 | if (xd->link_width != ret) |
| 993 | change = true; |
| 994 | |
| 995 | xd->link_width = ret; |
| 996 | |
| 997 | if (change) |
| 998 | kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); |
| 999 | |
| 1000 | return 0; |
| 1001 | } |
| 1002 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1003 | static void tb_xdomain_get_uuid(struct work_struct *work) |
| 1004 | { |
| 1005 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 1006 | get_uuid_work.work); |
| 1007 | struct tb *tb = xd->tb; |
| 1008 | uuid_t uuid; |
| 1009 | int ret; |
| 1010 | |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1011 | dev_dbg(&xd->dev, "requesting remote UUID\n"); |
| 1012 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1013 | ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); |
| 1014 | if (ret < 0) { |
| 1015 | if (xd->uuid_retries-- > 0) { |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1016 | dev_dbg(&xd->dev, "failed to request UUID, retrying\n"); |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1017 | queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, |
| 1018 | msecs_to_jiffies(100)); |
| 1019 | } else { |
| 1020 | dev_dbg(&xd->dev, "failed to read remote UUID\n"); |
| 1021 | } |
| 1022 | return; |
| 1023 | } |
| 1024 | |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1025 | dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid); |
| 1026 | |
Mika Westerberg | 47844ec | 2020-10-07 18:19:28 +0300 | [diff] [blame] | 1027 | if (uuid_equal(&uuid, xd->local_uuid)) |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1028 | dev_dbg(&xd->dev, "intra-domain loop detected\n"); |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1029 | |
| 1030 | /* |
| 1031 | * If the UUID is different, there is another domain connected |
| 1032 | * so mark this one unplugged and wait for the connection |
| 1033 | * manager to replace it. |
| 1034 | */ |
| 1035 | if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) { |
| 1036 | dev_dbg(&xd->dev, "remote UUID is different, unplugging\n"); |
| 1037 | xd->is_unplugged = true; |
| 1038 | return; |
| 1039 | } |
| 1040 | |
| 1041 | /* First time fill in the missing UUID */ |
| 1042 | if (!xd->remote_uuid) { |
| 1043 | xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); |
| 1044 | if (!xd->remote_uuid) |
| 1045 | return; |
| 1046 | } |
| 1047 | |
| 1048 | /* Now we can start the normal properties exchange */ |
| 1049 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 1050 | msecs_to_jiffies(100)); |
| 1051 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 1052 | msecs_to_jiffies(1000)); |
| 1053 | } |
| 1054 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1055 | static void tb_xdomain_get_properties(struct work_struct *work) |
| 1056 | { |
| 1057 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 1058 | get_properties_work.work); |
| 1059 | struct tb_property_dir *dir; |
| 1060 | struct tb *tb = xd->tb; |
| 1061 | bool update = false; |
| 1062 | u32 *block = NULL; |
| 1063 | u32 gen = 0; |
| 1064 | int ret; |
| 1065 | |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1066 | dev_dbg(&xd->dev, "requesting remote properties\n"); |
| 1067 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1068 | ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, |
| 1069 | xd->remote_uuid, xd->properties_retries, |
| 1070 | &block, &gen); |
| 1071 | if (ret < 0) { |
| 1072 | if (xd->properties_retries-- > 0) { |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1073 | dev_dbg(&xd->dev, |
| 1074 | "failed to request remote properties, retrying\n"); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1075 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 1076 | msecs_to_jiffies(1000)); |
| 1077 | } else { |
| 1078 | /* Give up now */ |
| 1079 | dev_err(&xd->dev, |
| 1080 | "failed read XDomain properties from %pUb\n", |
| 1081 | xd->remote_uuid); |
| 1082 | } |
| 1083 | return; |
| 1084 | } |
| 1085 | |
| 1086 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; |
| 1087 | |
| 1088 | mutex_lock(&xd->lock); |
| 1089 | |
| 1090 | /* Only accept newer generation properties */ |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1091 | if (xd->remote_properties && gen <= xd->remote_property_block_gen) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1092 | goto err_free_block; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1093 | |
| 1094 | dir = tb_property_parse_dir(block, ret); |
| 1095 | if (!dir) { |
| 1096 | dev_err(&xd->dev, "failed to parse XDomain properties\n"); |
| 1097 | goto err_free_block; |
| 1098 | } |
| 1099 | |
| 1100 | ret = populate_properties(xd, dir); |
| 1101 | if (ret) { |
| 1102 | dev_err(&xd->dev, "missing XDomain properties in response\n"); |
| 1103 | goto err_free_dir; |
| 1104 | } |
| 1105 | |
| 1106 | /* Release the existing one */ |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1107 | if (xd->remote_properties) { |
| 1108 | tb_property_free_dir(xd->remote_properties); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1109 | update = true; |
| 1110 | } |
| 1111 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1112 | xd->remote_properties = dir; |
| 1113 | xd->remote_property_block_gen = gen; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1114 | |
Isaac Hazan | 4210d50 | 2020-09-24 11:43:58 +0300 | [diff] [blame] | 1115 | tb_xdomain_update_link_attributes(xd); |
| 1116 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1117 | mutex_unlock(&xd->lock); |
| 1118 | |
| 1119 | kfree(block); |
| 1120 | |
| 1121 | /* |
| 1122 | * Now the device should be ready enough so we can add it to the |
| 1123 | * bus and let userspace know about it. If the device is already |
| 1124 | * registered, we notify the userspace that it has changed. |
| 1125 | */ |
| 1126 | if (!update) { |
| 1127 | if (device_add(&xd->dev)) { |
| 1128 | dev_err(&xd->dev, "failed to add XDomain device\n"); |
| 1129 | return; |
| 1130 | } |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1131 | dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n", |
| 1132 | xd->vendor, xd->device); |
| 1133 | if (xd->vendor_name && xd->device_name) |
| 1134 | dev_info(&xd->dev, "%s %s\n", xd->vendor_name, |
| 1135 | xd->device_name); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1136 | } else { |
| 1137 | kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); |
| 1138 | } |
| 1139 | |
| 1140 | enumerate_services(xd); |
| 1141 | return; |
| 1142 | |
| 1143 | err_free_dir: |
| 1144 | tb_property_free_dir(dir); |
| 1145 | err_free_block: |
| 1146 | kfree(block); |
| 1147 | mutex_unlock(&xd->lock); |
| 1148 | } |
| 1149 | |
| 1150 | static void tb_xdomain_properties_changed(struct work_struct *work) |
| 1151 | { |
| 1152 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 1153 | properties_changed_work.work); |
| 1154 | int ret; |
| 1155 | |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1156 | dev_dbg(&xd->dev, "sending properties changed notification\n"); |
| 1157 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1158 | ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, |
| 1159 | xd->properties_changed_retries, xd->local_uuid); |
| 1160 | if (ret) { |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1161 | if (xd->properties_changed_retries-- > 0) { |
| 1162 | dev_dbg(&xd->dev, |
| 1163 | "failed to send properties changed notification, retrying\n"); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1164 | queue_delayed_work(xd->tb->wq, |
| 1165 | &xd->properties_changed_work, |
| 1166 | msecs_to_jiffies(1000)); |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1167 | } |
| 1168 | dev_err(&xd->dev, "failed to send properties changed notification\n"); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1169 | return; |
| 1170 | } |
| 1171 | |
| 1172 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; |
| 1173 | } |
| 1174 | |
| 1175 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
| 1176 | char *buf) |
| 1177 | { |
| 1178 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1179 | |
| 1180 | return sprintf(buf, "%#x\n", xd->device); |
| 1181 | } |
| 1182 | static DEVICE_ATTR_RO(device); |
| 1183 | |
| 1184 | static ssize_t |
| 1185 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
| 1186 | { |
| 1187 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1188 | int ret; |
| 1189 | |
| 1190 | if (mutex_lock_interruptible(&xd->lock)) |
| 1191 | return -ERESTARTSYS; |
| 1192 | ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); |
| 1193 | mutex_unlock(&xd->lock); |
| 1194 | |
| 1195 | return ret; |
| 1196 | } |
| 1197 | static DEVICE_ATTR_RO(device_name); |
| 1198 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1199 | static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr, |
| 1200 | char *buf) |
| 1201 | { |
| 1202 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1203 | |
| 1204 | return sprintf(buf, "%d\n", xd->remote_max_hopid); |
| 1205 | } |
| 1206 | static DEVICE_ATTR_RO(maxhopid); |
| 1207 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1208 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
| 1209 | char *buf) |
| 1210 | { |
| 1211 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1212 | |
| 1213 | return sprintf(buf, "%#x\n", xd->vendor); |
| 1214 | } |
| 1215 | static DEVICE_ATTR_RO(vendor); |
| 1216 | |
| 1217 | static ssize_t |
| 1218 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
| 1219 | { |
| 1220 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1221 | int ret; |
| 1222 | |
| 1223 | if (mutex_lock_interruptible(&xd->lock)) |
| 1224 | return -ERESTARTSYS; |
| 1225 | ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); |
| 1226 | mutex_unlock(&xd->lock); |
| 1227 | |
| 1228 | return ret; |
| 1229 | } |
| 1230 | static DEVICE_ATTR_RO(vendor_name); |
| 1231 | |
| 1232 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
| 1233 | char *buf) |
| 1234 | { |
| 1235 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1236 | |
| 1237 | return sprintf(buf, "%pUb\n", xd->remote_uuid); |
| 1238 | } |
| 1239 | static DEVICE_ATTR_RO(unique_id); |
| 1240 | |
Isaac Hazan | 4210d50 | 2020-09-24 11:43:58 +0300 | [diff] [blame] | 1241 | static ssize_t speed_show(struct device *dev, struct device_attribute *attr, |
| 1242 | char *buf) |
| 1243 | { |
| 1244 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1245 | |
| 1246 | return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed); |
| 1247 | } |
| 1248 | |
| 1249 | static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); |
| 1250 | static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); |
| 1251 | |
| 1252 | static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, |
| 1253 | char *buf) |
| 1254 | { |
| 1255 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1256 | |
| 1257 | return sprintf(buf, "%u\n", xd->link_width); |
| 1258 | } |
| 1259 | |
| 1260 | static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); |
| 1261 | static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); |
| 1262 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1263 | static struct attribute *xdomain_attrs[] = { |
| 1264 | &dev_attr_device.attr, |
| 1265 | &dev_attr_device_name.attr, |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1266 | &dev_attr_maxhopid.attr, |
Isaac Hazan | 4210d50 | 2020-09-24 11:43:58 +0300 | [diff] [blame] | 1267 | &dev_attr_rx_lanes.attr, |
| 1268 | &dev_attr_rx_speed.attr, |
| 1269 | &dev_attr_tx_lanes.attr, |
| 1270 | &dev_attr_tx_speed.attr, |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1271 | &dev_attr_unique_id.attr, |
| 1272 | &dev_attr_vendor.attr, |
| 1273 | &dev_attr_vendor_name.attr, |
| 1274 | NULL, |
| 1275 | }; |
| 1276 | |
Rikard Falkeborn | 6889e00f | 2021-01-09 00:09:19 +0100 | [diff] [blame] | 1277 | static const struct attribute_group xdomain_attr_group = { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1278 | .attrs = xdomain_attrs, |
| 1279 | }; |
| 1280 | |
| 1281 | static const struct attribute_group *xdomain_attr_groups[] = { |
| 1282 | &xdomain_attr_group, |
| 1283 | NULL, |
| 1284 | }; |
| 1285 | |
| 1286 | static void tb_xdomain_release(struct device *dev) |
| 1287 | { |
| 1288 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1289 | |
| 1290 | put_device(xd->dev.parent); |
| 1291 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1292 | kfree(xd->local_property_block); |
| 1293 | tb_property_free_dir(xd->remote_properties); |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1294 | ida_destroy(&xd->out_hopids); |
| 1295 | ida_destroy(&xd->in_hopids); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1296 | ida_destroy(&xd->service_ids); |
| 1297 | |
| 1298 | kfree(xd->local_uuid); |
| 1299 | kfree(xd->remote_uuid); |
| 1300 | kfree(xd->device_name); |
| 1301 | kfree(xd->vendor_name); |
| 1302 | kfree(xd); |
| 1303 | } |
| 1304 | |
| 1305 | static void start_handshake(struct tb_xdomain *xd) |
| 1306 | { |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1307 | xd->uuid_retries = XDOMAIN_UUID_RETRIES; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1308 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; |
| 1309 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; |
| 1310 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1311 | if (xd->needs_uuid) { |
| 1312 | queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, |
| 1313 | msecs_to_jiffies(100)); |
| 1314 | } else { |
| 1315 | /* Start exchanging properties with the other host */ |
| 1316 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 1317 | msecs_to_jiffies(100)); |
| 1318 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 1319 | msecs_to_jiffies(1000)); |
| 1320 | } |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1321 | } |
| 1322 | |
| 1323 | static void stop_handshake(struct tb_xdomain *xd) |
| 1324 | { |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1325 | xd->uuid_retries = 0; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1326 | xd->properties_retries = 0; |
| 1327 | xd->properties_changed_retries = 0; |
| 1328 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1329 | cancel_delayed_work_sync(&xd->get_uuid_work); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1330 | cancel_delayed_work_sync(&xd->get_properties_work); |
| 1331 | cancel_delayed_work_sync(&xd->properties_changed_work); |
| 1332 | } |
| 1333 | |
| 1334 | static int __maybe_unused tb_xdomain_suspend(struct device *dev) |
| 1335 | { |
| 1336 | stop_handshake(tb_to_xdomain(dev)); |
| 1337 | return 0; |
| 1338 | } |
| 1339 | |
| 1340 | static int __maybe_unused tb_xdomain_resume(struct device *dev) |
| 1341 | { |
Mika Westerberg | 8ccbed2 | 2021-01-08 12:55:49 +0200 | [diff] [blame] | 1342 | start_handshake(tb_to_xdomain(dev)); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1343 | return 0; |
| 1344 | } |
| 1345 | |
| 1346 | static const struct dev_pm_ops tb_xdomain_pm_ops = { |
| 1347 | SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume) |
| 1348 | }; |
| 1349 | |
| 1350 | struct device_type tb_xdomain_type = { |
| 1351 | .name = "thunderbolt_xdomain", |
| 1352 | .release = tb_xdomain_release, |
| 1353 | .pm = &tb_xdomain_pm_ops, |
| 1354 | }; |
| 1355 | EXPORT_SYMBOL_GPL(tb_xdomain_type); |
| 1356 | |
| 1357 | /** |
| 1358 | * tb_xdomain_alloc() - Allocate new XDomain object |
| 1359 | * @tb: Domain where the XDomain belongs |
| 1360 | * @parent: Parent device (the switch through the connection to the |
| 1361 | * other domain is reached). |
| 1362 | * @route: Route string used to reach the other domain |
| 1363 | * @local_uuid: Our local domain UUID |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1364 | * @remote_uuid: UUID of the other domain (optional) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1365 | * |
| 1366 | * Allocates new XDomain structure and returns pointer to that. The |
| 1367 | * object must be released by calling tb_xdomain_put(). |
| 1368 | */ |
| 1369 | struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, |
| 1370 | u64 route, const uuid_t *local_uuid, |
| 1371 | const uuid_t *remote_uuid) |
| 1372 | { |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 1373 | struct tb_switch *parent_sw = tb_to_switch(parent); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1374 | struct tb_xdomain *xd; |
Mika Westerberg | b040798 | 2019-12-17 15:33:40 +0300 | [diff] [blame] | 1375 | struct tb_port *down; |
| 1376 | |
| 1377 | /* Make sure the downstream domain is accessible */ |
| 1378 | down = tb_port_at(route, parent_sw); |
| 1379 | tb_port_unlock(down); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1380 | |
| 1381 | xd = kzalloc(sizeof(*xd), GFP_KERNEL); |
| 1382 | if (!xd) |
| 1383 | return NULL; |
| 1384 | |
| 1385 | xd->tb = tb; |
| 1386 | xd->route = route; |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1387 | xd->local_max_hopid = down->config.max_in_hop_id; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1388 | ida_init(&xd->service_ids); |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1389 | ida_init(&xd->in_hopids); |
| 1390 | ida_init(&xd->out_hopids); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1391 | mutex_init(&xd->lock); |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1392 | INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1393 | INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); |
| 1394 | INIT_DELAYED_WORK(&xd->properties_changed_work, |
| 1395 | tb_xdomain_properties_changed); |
| 1396 | |
| 1397 | xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); |
| 1398 | if (!xd->local_uuid) |
| 1399 | goto err_free; |
| 1400 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1401 | if (remote_uuid) { |
| 1402 | xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), |
| 1403 | GFP_KERNEL); |
| 1404 | if (!xd->remote_uuid) |
| 1405 | goto err_free_local_uuid; |
| 1406 | } else { |
| 1407 | xd->needs_uuid = true; |
| 1408 | } |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1409 | |
| 1410 | device_initialize(&xd->dev); |
| 1411 | xd->dev.parent = get_device(parent); |
| 1412 | xd->dev.bus = &tb_bus_type; |
| 1413 | xd->dev.type = &tb_xdomain_type; |
| 1414 | xd->dev.groups = xdomain_attr_groups; |
| 1415 | dev_set_name(&xd->dev, "%u-%llx", tb->index, route); |
| 1416 | |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1417 | dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid); |
| 1418 | if (remote_uuid) |
| 1419 | dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid); |
| 1420 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 1421 | /* |
| 1422 | * This keeps the DMA powered on as long as we have active |
| 1423 | * connection to another host. |
| 1424 | */ |
| 1425 | pm_runtime_set_active(&xd->dev); |
| 1426 | pm_runtime_get_noresume(&xd->dev); |
| 1427 | pm_runtime_enable(&xd->dev); |
| 1428 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1429 | return xd; |
| 1430 | |
| 1431 | err_free_local_uuid: |
| 1432 | kfree(xd->local_uuid); |
| 1433 | err_free: |
| 1434 | kfree(xd); |
| 1435 | |
| 1436 | return NULL; |
| 1437 | } |
| 1438 | |
| 1439 | /** |
| 1440 | * tb_xdomain_add() - Add XDomain to the bus |
| 1441 | * @xd: XDomain to add |
| 1442 | * |
| 1443 | * This function starts XDomain discovery protocol handshake and |
| 1444 | * eventually adds the XDomain to the bus. After calling this function |
| 1445 | * the caller needs to call tb_xdomain_remove() in order to remove and |
| 1446 | * release the object regardless whether the handshake succeeded or not. |
| 1447 | */ |
| 1448 | void tb_xdomain_add(struct tb_xdomain *xd) |
| 1449 | { |
| 1450 | /* Start exchanging properties with the other host */ |
| 1451 | start_handshake(xd); |
| 1452 | } |
| 1453 | |
| 1454 | static int unregister_service(struct device *dev, void *data) |
| 1455 | { |
| 1456 | device_unregister(dev); |
| 1457 | return 0; |
| 1458 | } |
| 1459 | |
| 1460 | /** |
| 1461 | * tb_xdomain_remove() - Remove XDomain from the bus |
| 1462 | * @xd: XDomain to remove |
| 1463 | * |
| 1464 | * This will stop all ongoing configuration work and remove the XDomain |
| 1465 | * along with any services from the bus. When the last reference to @xd |
| 1466 | * is released the object will be released as well. |
| 1467 | */ |
| 1468 | void tb_xdomain_remove(struct tb_xdomain *xd) |
| 1469 | { |
| 1470 | stop_handshake(xd); |
| 1471 | |
| 1472 | device_for_each_child_reverse(&xd->dev, xd, unregister_service); |
| 1473 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 1474 | /* |
| 1475 | * Undo runtime PM here explicitly because it is possible that |
| 1476 | * the XDomain was never added to the bus and thus device_del() |
| 1477 | * is not called for it (device_del() would handle this otherwise). |
| 1478 | */ |
| 1479 | pm_runtime_disable(&xd->dev); |
| 1480 | pm_runtime_put_noidle(&xd->dev); |
| 1481 | pm_runtime_set_suspended(&xd->dev); |
| 1482 | |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1483 | if (!device_is_registered(&xd->dev)) { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1484 | put_device(&xd->dev); |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1485 | } else { |
| 1486 | dev_info(&xd->dev, "host disconnected\n"); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1487 | device_unregister(&xd->dev); |
Mika Westerberg | d29c59b | 2020-12-17 15:24:56 +0300 | [diff] [blame] | 1488 | } |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1489 | } |
| 1490 | |
| 1491 | /** |
Isaac Hazan | 5cc0df9 | 2020-09-24 11:44:01 +0300 | [diff] [blame] | 1492 | * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain |
| 1493 | * @xd: XDomain connection |
| 1494 | * |
| 1495 | * Lane bonding is disabled by default for XDomains. This function tries |
| 1496 | * to enable bonding by first enabling the port and waiting for the CL0 |
| 1497 | * state. |
| 1498 | * |
| 1499 | * Return: %0 in case of success and negative errno in case of error. |
| 1500 | */ |
| 1501 | int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd) |
| 1502 | { |
| 1503 | struct tb_port *port; |
| 1504 | int ret; |
| 1505 | |
| 1506 | port = tb_port_at(xd->route, tb_xdomain_parent(xd)); |
| 1507 | if (!port->dual_link_port) |
| 1508 | return -ENODEV; |
| 1509 | |
| 1510 | ret = tb_port_enable(port->dual_link_port); |
| 1511 | if (ret) |
| 1512 | return ret; |
| 1513 | |
| 1514 | ret = tb_wait_for_port(port->dual_link_port, true); |
| 1515 | if (ret < 0) |
| 1516 | return ret; |
| 1517 | if (!ret) |
| 1518 | return -ENOTCONN; |
| 1519 | |
| 1520 | ret = tb_port_lane_bonding_enable(port); |
| 1521 | if (ret) { |
| 1522 | tb_port_warn(port, "failed to enable lane bonding\n"); |
| 1523 | return ret; |
| 1524 | } |
| 1525 | |
Mika Westerberg | e7051be | 2021-03-22 16:54:54 +0200 | [diff] [blame] | 1526 | ret = tb_port_wait_for_link_width(port, 2, 100); |
| 1527 | if (ret) { |
| 1528 | tb_port_warn(port, "timeout enabling lane bonding\n"); |
| 1529 | return ret; |
| 1530 | } |
| 1531 | |
Mika Westerberg | 69fea37 | 2021-03-22 17:01:59 +0200 | [diff] [blame] | 1532 | tb_port_update_credits(port); |
Isaac Hazan | 5cc0df9 | 2020-09-24 11:44:01 +0300 | [diff] [blame] | 1533 | tb_xdomain_update_link_attributes(xd); |
| 1534 | |
| 1535 | dev_dbg(&xd->dev, "lane bonding enabled\n"); |
| 1536 | return 0; |
| 1537 | } |
| 1538 | EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable); |
| 1539 | |
| 1540 | /** |
| 1541 | * tb_xdomain_lane_bonding_disable() - Disable lane bonding |
| 1542 | * @xd: XDomain connection |
| 1543 | * |
| 1544 | * Lane bonding is disabled by default for XDomains. If bonding has been |
| 1545 | * enabled, this function can be used to disable it. |
| 1546 | */ |
| 1547 | void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd) |
| 1548 | { |
| 1549 | struct tb_port *port; |
| 1550 | |
| 1551 | port = tb_port_at(xd->route, tb_xdomain_parent(xd)); |
| 1552 | if (port->dual_link_port) { |
| 1553 | tb_port_lane_bonding_disable(port); |
Mika Westerberg | e7051be | 2021-03-22 16:54:54 +0200 | [diff] [blame] | 1554 | if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT) |
| 1555 | tb_port_warn(port, "timeout disabling lane bonding\n"); |
Isaac Hazan | 5cc0df9 | 2020-09-24 11:44:01 +0300 | [diff] [blame] | 1556 | tb_port_disable(port->dual_link_port); |
Mika Westerberg | 69fea37 | 2021-03-22 17:01:59 +0200 | [diff] [blame] | 1557 | tb_port_update_credits(port); |
Isaac Hazan | 5cc0df9 | 2020-09-24 11:44:01 +0300 | [diff] [blame] | 1558 | tb_xdomain_update_link_attributes(xd); |
| 1559 | |
| 1560 | dev_dbg(&xd->dev, "lane bonding disabled\n"); |
| 1561 | } |
| 1562 | } |
| 1563 | EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable); |
| 1564 | |
| 1565 | /** |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1566 | * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling |
| 1567 | * @xd: XDomain connection |
| 1568 | * @hopid: Preferred HopID or %-1 for next available |
| 1569 | * |
| 1570 | * Returns allocated HopID or negative errno. Specifically returns |
| 1571 | * %-ENOSPC if there are no more available HopIDs. Returned HopID is |
| 1572 | * guaranteed to be within range supported by the input lane adapter. |
| 1573 | * Call tb_xdomain_release_in_hopid() to release the allocated HopID. |
| 1574 | */ |
| 1575 | int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid) |
| 1576 | { |
| 1577 | if (hopid < 0) |
| 1578 | hopid = TB_PATH_MIN_HOPID; |
| 1579 | if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid) |
| 1580 | return -EINVAL; |
| 1581 | |
| 1582 | return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid, |
| 1583 | GFP_KERNEL); |
| 1584 | } |
| 1585 | EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid); |
| 1586 | |
| 1587 | /** |
| 1588 | * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling |
| 1589 | * @xd: XDomain connection |
| 1590 | * @hopid: Preferred HopID or %-1 for next available |
| 1591 | * |
| 1592 | * Returns allocated HopID or negative errno. Specifically returns |
| 1593 | * %-ENOSPC if there are no more available HopIDs. Returned HopID is |
| 1594 | * guaranteed to be within range supported by the output lane adapter. |
| 1595 | * Call tb_xdomain_release_in_hopid() to release the allocated HopID. |
| 1596 | */ |
| 1597 | int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid) |
| 1598 | { |
| 1599 | if (hopid < 0) |
| 1600 | hopid = TB_PATH_MIN_HOPID; |
| 1601 | if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid) |
| 1602 | return -EINVAL; |
| 1603 | |
| 1604 | return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid, |
| 1605 | GFP_KERNEL); |
| 1606 | } |
| 1607 | EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid); |
| 1608 | |
| 1609 | /** |
| 1610 | * tb_xdomain_release_in_hopid() - Release input HopID |
| 1611 | * @xd: XDomain connection |
| 1612 | * @hopid: HopID to release |
| 1613 | */ |
| 1614 | void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid) |
| 1615 | { |
| 1616 | ida_free(&xd->in_hopids, hopid); |
| 1617 | } |
| 1618 | EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid); |
| 1619 | |
| 1620 | /** |
| 1621 | * tb_xdomain_release_out_hopid() - Release output HopID |
| 1622 | * @xd: XDomain connection |
| 1623 | * @hopid: HopID to release |
| 1624 | */ |
| 1625 | void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid) |
| 1626 | { |
| 1627 | ida_free(&xd->out_hopids, hopid); |
| 1628 | } |
| 1629 | EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid); |
| 1630 | |
| 1631 | /** |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1632 | * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection |
| 1633 | * @xd: XDomain connection |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1634 | * @transmit_path: HopID we are using to send out packets |
| 1635 | * @transmit_ring: DMA ring used to send out packets |
| 1636 | * @receive_path: HopID the other end is using to send packets to us |
| 1637 | * @receive_ring: DMA ring used to receive packets from @receive_path |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1638 | * |
| 1639 | * The function enables DMA paths accordingly so that after successful |
| 1640 | * return the caller can send and receive packets using high-speed DMA |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1641 | * path. If a transmit or receive path is not needed, pass %-1 for those |
| 1642 | * parameters. |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1643 | * |
| 1644 | * Return: %0 in case of success and negative errno in case of error |
| 1645 | */ |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1646 | int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, |
| 1647 | int transmit_ring, int receive_path, |
| 1648 | int receive_ring) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1649 | { |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1650 | return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path, |
| 1651 | transmit_ring, receive_path, |
| 1652 | receive_ring); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1653 | } |
| 1654 | EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); |
| 1655 | |
| 1656 | /** |
| 1657 | * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection |
| 1658 | * @xd: XDomain connection |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1659 | * @transmit_path: HopID we are using to send out packets |
| 1660 | * @transmit_ring: DMA ring used to send out packets |
| 1661 | * @receive_path: HopID the other end is using to send packets to us |
| 1662 | * @receive_ring: DMA ring used to receive packets from @receive_path |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1663 | * |
| 1664 | * This does the opposite of tb_xdomain_enable_paths(). After call to |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1665 | * this the caller is not expected to use the rings anymore. Passing %-1 |
| 1666 | * as path/ring parameter means don't care. Normally the callers should |
| 1667 | * pass the same values here as they do when paths are enabled. |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1668 | * |
| 1669 | * Return: %0 in case of success and negative errno in case of error |
| 1670 | */ |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1671 | int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, |
| 1672 | int transmit_ring, int receive_path, |
| 1673 | int receive_ring) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1674 | { |
Mika Westerberg | 180b068 | 2021-01-08 16:25:39 +0200 | [diff] [blame] | 1675 | return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path, |
| 1676 | transmit_ring, receive_path, |
| 1677 | receive_ring); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1678 | } |
| 1679 | EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); |
| 1680 | |
| 1681 | struct tb_xdomain_lookup { |
| 1682 | const uuid_t *uuid; |
| 1683 | u8 link; |
| 1684 | u8 depth; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1685 | u64 route; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1686 | }; |
| 1687 | |
| 1688 | static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, |
| 1689 | const struct tb_xdomain_lookup *lookup) |
| 1690 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 1691 | struct tb_port *port; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1692 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 1693 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1694 | struct tb_xdomain *xd; |
| 1695 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1696 | if (port->xdomain) { |
| 1697 | xd = port->xdomain; |
| 1698 | |
| 1699 | if (lookup->uuid) { |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1700 | if (xd->remote_uuid && |
| 1701 | uuid_equal(xd->remote_uuid, lookup->uuid)) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1702 | return xd; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1703 | } else if (lookup->link && |
| 1704 | lookup->link == xd->link && |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1705 | lookup->depth == xd->depth) { |
| 1706 | return xd; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1707 | } else if (lookup->route && |
| 1708 | lookup->route == xd->route) { |
| 1709 | return xd; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1710 | } |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 1711 | } else if (tb_port_has_remote(port)) { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1712 | xd = switch_find_xdomain(port->remote->sw, lookup); |
| 1713 | if (xd) |
| 1714 | return xd; |
| 1715 | } |
| 1716 | } |
| 1717 | |
| 1718 | return NULL; |
| 1719 | } |
| 1720 | |
| 1721 | /** |
| 1722 | * tb_xdomain_find_by_uuid() - Find an XDomain by UUID |
| 1723 | * @tb: Domain where the XDomain belongs to |
| 1724 | * @uuid: UUID to look for |
| 1725 | * |
| 1726 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1727 | * The returned XDomain will have its reference count increased so the |
| 1728 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1729 | * object. |
| 1730 | * |
| 1731 | * This will find all XDomains including the ones that are not yet added |
| 1732 | * to the bus (handshake is still in progress). |
| 1733 | * |
| 1734 | * The caller needs to hold @tb->lock. |
| 1735 | */ |
| 1736 | struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
| 1737 | { |
| 1738 | struct tb_xdomain_lookup lookup; |
| 1739 | struct tb_xdomain *xd; |
| 1740 | |
| 1741 | memset(&lookup, 0, sizeof(lookup)); |
| 1742 | lookup.uuid = uuid; |
| 1743 | |
| 1744 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1745 | return tb_xdomain_get(xd); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1746 | } |
| 1747 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid); |
| 1748 | |
| 1749 | /** |
| 1750 | * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth |
| 1751 | * @tb: Domain where the XDomain belongs to |
| 1752 | * @link: Root switch link number |
| 1753 | * @depth: Depth in the link |
| 1754 | * |
| 1755 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1756 | * The returned XDomain will have its reference count increased so the |
| 1757 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1758 | * object. |
| 1759 | * |
| 1760 | * This will find all XDomains including the ones that are not yet added |
| 1761 | * to the bus (handshake is still in progress). |
| 1762 | * |
| 1763 | * The caller needs to hold @tb->lock. |
| 1764 | */ |
| 1765 | struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, |
| 1766 | u8 depth) |
| 1767 | { |
| 1768 | struct tb_xdomain_lookup lookup; |
| 1769 | struct tb_xdomain *xd; |
| 1770 | |
| 1771 | memset(&lookup, 0, sizeof(lookup)); |
| 1772 | lookup.link = link; |
| 1773 | lookup.depth = depth; |
| 1774 | |
| 1775 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1776 | return tb_xdomain_get(xd); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1777 | } |
| 1778 | |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1779 | /** |
| 1780 | * tb_xdomain_find_by_route() - Find an XDomain by route string |
| 1781 | * @tb: Domain where the XDomain belongs to |
| 1782 | * @route: XDomain route string |
| 1783 | * |
| 1784 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1785 | * The returned XDomain will have its reference count increased so the |
| 1786 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1787 | * object. |
| 1788 | * |
| 1789 | * This will find all XDomains including the ones that are not yet added |
| 1790 | * to the bus (handshake is still in progress). |
| 1791 | * |
| 1792 | * The caller needs to hold @tb->lock. |
| 1793 | */ |
| 1794 | struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) |
| 1795 | { |
| 1796 | struct tb_xdomain_lookup lookup; |
| 1797 | struct tb_xdomain *xd; |
| 1798 | |
| 1799 | memset(&lookup, 0, sizeof(lookup)); |
| 1800 | lookup.route = route; |
| 1801 | |
| 1802 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
| 1803 | return tb_xdomain_get(xd); |
| 1804 | } |
| 1805 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route); |
| 1806 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1807 | bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, |
| 1808 | const void *buf, size_t size) |
| 1809 | { |
| 1810 | const struct tb_protocol_handler *handler, *tmp; |
| 1811 | const struct tb_xdp_header *hdr = buf; |
| 1812 | unsigned int length; |
| 1813 | int ret = 0; |
| 1814 | |
| 1815 | /* We expect the packet is at least size of the header */ |
| 1816 | length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
| 1817 | if (length != size / 4 - sizeof(hdr->xd_hdr) / 4) |
| 1818 | return true; |
| 1819 | if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4) |
| 1820 | return true; |
| 1821 | |
| 1822 | /* |
| 1823 | * Handle XDomain discovery protocol packets directly here. For |
| 1824 | * other protocols (based on their UUID) we call registered |
| 1825 | * handlers in turn. |
| 1826 | */ |
| 1827 | if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) { |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 1828 | if (type == TB_CFG_PKG_XDOMAIN_REQ) |
| 1829 | return tb_xdp_schedule_request(tb, hdr, size); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1830 | return false; |
| 1831 | } |
| 1832 | |
| 1833 | mutex_lock(&xdomain_lock); |
| 1834 | list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) { |
| 1835 | if (!uuid_equal(&hdr->uuid, handler->uuid)) |
| 1836 | continue; |
| 1837 | |
| 1838 | mutex_unlock(&xdomain_lock); |
| 1839 | ret = handler->callback(buf, size, handler->data); |
| 1840 | mutex_lock(&xdomain_lock); |
| 1841 | |
| 1842 | if (ret) |
| 1843 | break; |
| 1844 | } |
| 1845 | mutex_unlock(&xdomain_lock); |
| 1846 | |
| 1847 | return ret > 0; |
| 1848 | } |
| 1849 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1850 | static int update_xdomain(struct device *dev, void *data) |
| 1851 | { |
| 1852 | struct tb_xdomain *xd; |
| 1853 | |
| 1854 | xd = tb_to_xdomain(dev); |
| 1855 | if (xd) { |
| 1856 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 1857 | msecs_to_jiffies(50)); |
| 1858 | } |
| 1859 | |
| 1860 | return 0; |
| 1861 | } |
| 1862 | |
| 1863 | static void update_all_xdomains(void) |
| 1864 | { |
| 1865 | bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain); |
| 1866 | } |
| 1867 | |
| 1868 | static bool remove_directory(const char *key, const struct tb_property_dir *dir) |
| 1869 | { |
| 1870 | struct tb_property *p; |
| 1871 | |
| 1872 | p = tb_property_find(xdomain_property_dir, key, |
| 1873 | TB_PROPERTY_TYPE_DIRECTORY); |
| 1874 | if (p && p->value.dir == dir) { |
| 1875 | tb_property_remove(p); |
| 1876 | return true; |
| 1877 | } |
| 1878 | return false; |
| 1879 | } |
| 1880 | |
| 1881 | /** |
| 1882 | * tb_register_property_dir() - Register property directory to the host |
| 1883 | * @key: Key (name) of the directory to add |
| 1884 | * @dir: Directory to add |
| 1885 | * |
| 1886 | * Service drivers can use this function to add new property directory |
| 1887 | * to the host available properties. The other connected hosts are |
| 1888 | * notified so they can re-read properties of this host if they are |
| 1889 | * interested. |
| 1890 | * |
| 1891 | * Return: %0 on success and negative errno on failure |
| 1892 | */ |
| 1893 | int tb_register_property_dir(const char *key, struct tb_property_dir *dir) |
| 1894 | { |
| 1895 | int ret; |
| 1896 | |
Mika Westerberg | acb40d8 | 2017-10-09 16:22:34 +0300 | [diff] [blame] | 1897 | if (WARN_ON(!xdomain_property_dir)) |
| 1898 | return -EAGAIN; |
| 1899 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1900 | if (!key || strlen(key) > 8) |
| 1901 | return -EINVAL; |
| 1902 | |
| 1903 | mutex_lock(&xdomain_lock); |
| 1904 | if (tb_property_find(xdomain_property_dir, key, |
| 1905 | TB_PROPERTY_TYPE_DIRECTORY)) { |
| 1906 | ret = -EEXIST; |
| 1907 | goto err_unlock; |
| 1908 | } |
| 1909 | |
| 1910 | ret = tb_property_add_dir(xdomain_property_dir, key, dir); |
| 1911 | if (ret) |
| 1912 | goto err_unlock; |
| 1913 | |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1914 | xdomain_property_block_gen++; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1915 | |
| 1916 | mutex_unlock(&xdomain_lock); |
| 1917 | update_all_xdomains(); |
| 1918 | return 0; |
| 1919 | |
| 1920 | err_unlock: |
| 1921 | mutex_unlock(&xdomain_lock); |
| 1922 | return ret; |
| 1923 | } |
| 1924 | EXPORT_SYMBOL_GPL(tb_register_property_dir); |
| 1925 | |
| 1926 | /** |
| 1927 | * tb_unregister_property_dir() - Removes property directory from host |
| 1928 | * @key: Key (name) of the directory |
| 1929 | * @dir: Directory to remove |
| 1930 | * |
| 1931 | * This will remove the existing directory from this host and notify the |
| 1932 | * connected hosts about the change. |
| 1933 | */ |
| 1934 | void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) |
| 1935 | { |
| 1936 | int ret = 0; |
| 1937 | |
| 1938 | mutex_lock(&xdomain_lock); |
| 1939 | if (remove_directory(key, dir)) |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1940 | xdomain_property_block_gen++; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1941 | mutex_unlock(&xdomain_lock); |
| 1942 | |
| 1943 | if (!ret) |
| 1944 | update_all_xdomains(); |
| 1945 | } |
| 1946 | EXPORT_SYMBOL_GPL(tb_unregister_property_dir); |
| 1947 | |
| 1948 | int tb_xdomain_init(void) |
| 1949 | { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1950 | xdomain_property_dir = tb_property_create_dir(NULL); |
| 1951 | if (!xdomain_property_dir) |
| 1952 | return -ENOMEM; |
| 1953 | |
| 1954 | /* |
| 1955 | * Initialize standard set of properties without any service |
| 1956 | * directories. Those will be added by service drivers |
| 1957 | * themselves when they are loaded. |
Mika Westerberg | 8a00c67 | 2020-05-26 17:26:38 +0300 | [diff] [blame] | 1958 | * |
Mika Westerberg | 46b494f | 2021-01-08 14:57:19 +0200 | [diff] [blame] | 1959 | * Rest of the properties are filled dynamically based on these |
| 1960 | * when the P2P connection is made. |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1961 | */ |
| 1962 | tb_property_add_immediate(xdomain_property_dir, "vendorid", |
| 1963 | PCI_VENDOR_ID_INTEL); |
| 1964 | tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp."); |
| 1965 | tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1966 | tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); |
| 1967 | |
Mika Westerberg | a6932c3 | 2021-01-08 14:05:06 +0200 | [diff] [blame] | 1968 | xdomain_property_block_gen = prandom_u32(); |
Mika Westerberg | 8a00c67 | 2020-05-26 17:26:38 +0300 | [diff] [blame] | 1969 | return 0; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1970 | } |
| 1971 | |
| 1972 | void tb_xdomain_exit(void) |
| 1973 | { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1974 | tb_property_free_dir(xdomain_property_dir); |
| 1975 | } |