Mika Westerberg | fd3b339 | 2018-10-01 12:31:21 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 2 | /* |
| 3 | * Thunderbolt XDomain discovery protocol support |
| 4 | * |
| 5 | * Copyright (C) 2017, Intel Corporation |
| 6 | * Authors: Michael Jamet <michael.jamet@intel.com> |
| 7 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/kmod.h> |
| 12 | #include <linux/module.h> |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 13 | #include <linux/pm_runtime.h> |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 14 | #include <linux/utsname.h> |
| 15 | #include <linux/uuid.h> |
| 16 | #include <linux/workqueue.h> |
| 17 | |
| 18 | #include "tb.h" |
| 19 | |
| 20 | #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */ |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 21 | #define XDOMAIN_UUID_RETRIES 10 |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 22 | #define XDOMAIN_PROPERTIES_RETRIES 60 |
| 23 | #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 |
| 24 | |
| 25 | struct xdomain_request_work { |
| 26 | struct work_struct work; |
| 27 | struct tb_xdp_header *pkg; |
| 28 | struct tb *tb; |
| 29 | }; |
| 30 | |
| 31 | /* Serializes access to the properties and protocol handlers below */ |
| 32 | static DEFINE_MUTEX(xdomain_lock); |
| 33 | |
| 34 | /* Properties exposed to the remote domains */ |
| 35 | static struct tb_property_dir *xdomain_property_dir; |
| 36 | static u32 *xdomain_property_block; |
| 37 | static u32 xdomain_property_block_len; |
| 38 | static u32 xdomain_property_block_gen; |
| 39 | |
| 40 | /* Additional protocol handlers */ |
| 41 | static LIST_HEAD(protocol_handlers); |
| 42 | |
| 43 | /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */ |
| 44 | static const uuid_t tb_xdp_uuid = |
| 45 | UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, |
| 46 | 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); |
| 47 | |
| 48 | static bool tb_xdomain_match(const struct tb_cfg_request *req, |
| 49 | const struct ctl_pkg *pkg) |
| 50 | { |
| 51 | switch (pkg->frame.eof) { |
| 52 | case TB_CFG_PKG_ERROR: |
| 53 | return true; |
| 54 | |
| 55 | case TB_CFG_PKG_XDOMAIN_RESP: { |
| 56 | const struct tb_xdp_header *res_hdr = pkg->buffer; |
| 57 | const struct tb_xdp_header *req_hdr = req->request; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 58 | |
| 59 | if (pkg->frame.size < req->response_size / 4) |
| 60 | return false; |
| 61 | |
| 62 | /* Make sure route matches */ |
| 63 | if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) != |
| 64 | req_hdr->xd_hdr.route_hi) |
| 65 | return false; |
| 66 | if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo) |
| 67 | return false; |
| 68 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 69 | /* Check that the XDomain protocol matches */ |
| 70 | if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid)) |
| 71 | return false; |
| 72 | |
| 73 | return true; |
| 74 | } |
| 75 | |
| 76 | default: |
| 77 | return false; |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | static bool tb_xdomain_copy(struct tb_cfg_request *req, |
| 82 | const struct ctl_pkg *pkg) |
| 83 | { |
| 84 | memcpy(req->response, pkg->buffer, req->response_size); |
| 85 | req->result.err = 0; |
| 86 | return true; |
| 87 | } |
| 88 | |
| 89 | static void response_ready(void *data) |
| 90 | { |
| 91 | tb_cfg_request_put(data); |
| 92 | } |
| 93 | |
| 94 | static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response, |
| 95 | size_t size, enum tb_cfg_pkg_type type) |
| 96 | { |
| 97 | struct tb_cfg_request *req; |
| 98 | |
| 99 | req = tb_cfg_request_alloc(); |
| 100 | if (!req) |
| 101 | return -ENOMEM; |
| 102 | |
| 103 | req->match = tb_xdomain_match; |
| 104 | req->copy = tb_xdomain_copy; |
| 105 | req->request = response; |
| 106 | req->request_size = size; |
| 107 | req->request_type = type; |
| 108 | |
| 109 | return tb_cfg_request(ctl, req, response_ready, req); |
| 110 | } |
| 111 | |
| 112 | /** |
| 113 | * tb_xdomain_response() - Send a XDomain response message |
| 114 | * @xd: XDomain to send the message |
| 115 | * @response: Response to send |
| 116 | * @size: Size of the response |
| 117 | * @type: PDF type of the response |
| 118 | * |
| 119 | * This can be used to send a XDomain response message to the other |
| 120 | * domain. No response for the message is expected. |
| 121 | * |
| 122 | * Return: %0 in case of success and negative errno in case of failure |
| 123 | */ |
| 124 | int tb_xdomain_response(struct tb_xdomain *xd, const void *response, |
| 125 | size_t size, enum tb_cfg_pkg_type type) |
| 126 | { |
| 127 | return __tb_xdomain_response(xd->tb->ctl, response, size, type); |
| 128 | } |
| 129 | EXPORT_SYMBOL_GPL(tb_xdomain_response); |
| 130 | |
| 131 | static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request, |
| 132 | size_t request_size, enum tb_cfg_pkg_type request_type, void *response, |
| 133 | size_t response_size, enum tb_cfg_pkg_type response_type, |
| 134 | unsigned int timeout_msec) |
| 135 | { |
| 136 | struct tb_cfg_request *req; |
| 137 | struct tb_cfg_result res; |
| 138 | |
| 139 | req = tb_cfg_request_alloc(); |
| 140 | if (!req) |
| 141 | return -ENOMEM; |
| 142 | |
| 143 | req->match = tb_xdomain_match; |
| 144 | req->copy = tb_xdomain_copy; |
| 145 | req->request = request; |
| 146 | req->request_size = request_size; |
| 147 | req->request_type = request_type; |
| 148 | req->response = response; |
| 149 | req->response_size = response_size; |
| 150 | req->response_type = response_type; |
| 151 | |
| 152 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
| 153 | |
| 154 | tb_cfg_request_put(req); |
| 155 | |
| 156 | return res.err == 1 ? -EIO : res.err; |
| 157 | } |
| 158 | |
| 159 | /** |
| 160 | * tb_xdomain_request() - Send a XDomain request |
| 161 | * @xd: XDomain to send the request |
| 162 | * @request: Request to send |
| 163 | * @request_size: Size of the request in bytes |
| 164 | * @request_type: PDF type of the request |
| 165 | * @response: Response is copied here |
| 166 | * @response_size: Expected size of the response in bytes |
| 167 | * @response_type: Expected PDF type of the response |
| 168 | * @timeout_msec: Timeout in milliseconds to wait for the response |
| 169 | * |
| 170 | * This function can be used to send XDomain control channel messages to |
| 171 | * the other domain. The function waits until the response is received |
| 172 | * or when timeout triggers. Whichever comes first. |
| 173 | * |
| 174 | * Return: %0 in case of success and negative errno in case of failure |
| 175 | */ |
| 176 | int tb_xdomain_request(struct tb_xdomain *xd, const void *request, |
| 177 | size_t request_size, enum tb_cfg_pkg_type request_type, |
| 178 | void *response, size_t response_size, |
| 179 | enum tb_cfg_pkg_type response_type, unsigned int timeout_msec) |
| 180 | { |
| 181 | return __tb_xdomain_request(xd->tb->ctl, request, request_size, |
| 182 | request_type, response, response_size, |
| 183 | response_type, timeout_msec); |
| 184 | } |
| 185 | EXPORT_SYMBOL_GPL(tb_xdomain_request); |
| 186 | |
| 187 | static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, |
| 188 | u8 sequence, enum tb_xdp_type type, size_t size) |
| 189 | { |
| 190 | u32 length_sn; |
| 191 | |
| 192 | length_sn = (size - sizeof(hdr->xd_hdr)) / 4; |
| 193 | length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK; |
| 194 | |
| 195 | hdr->xd_hdr.route_hi = upper_32_bits(route); |
| 196 | hdr->xd_hdr.route_lo = lower_32_bits(route); |
| 197 | hdr->xd_hdr.length_sn = length_sn; |
| 198 | hdr->type = type; |
| 199 | memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); |
| 200 | } |
| 201 | |
| 202 | static int tb_xdp_handle_error(const struct tb_xdp_header *hdr) |
| 203 | { |
| 204 | const struct tb_xdp_error_response *error; |
| 205 | |
| 206 | if (hdr->type != ERROR_RESPONSE) |
| 207 | return 0; |
| 208 | |
| 209 | error = (const struct tb_xdp_error_response *)hdr; |
| 210 | |
| 211 | switch (error->error) { |
| 212 | case ERROR_UNKNOWN_PACKET: |
| 213 | case ERROR_UNKNOWN_DOMAIN: |
| 214 | return -EIO; |
| 215 | case ERROR_NOT_SUPPORTED: |
| 216 | return -ENOTSUPP; |
| 217 | case ERROR_NOT_READY: |
| 218 | return -EAGAIN; |
| 219 | default: |
| 220 | break; |
| 221 | } |
| 222 | |
| 223 | return 0; |
| 224 | } |
| 225 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 226 | static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, |
| 227 | uuid_t *uuid) |
| 228 | { |
| 229 | struct tb_xdp_uuid_response res; |
| 230 | struct tb_xdp_uuid req; |
| 231 | int ret; |
| 232 | |
| 233 | memset(&req, 0, sizeof(req)); |
| 234 | tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST, |
| 235 | sizeof(req)); |
| 236 | |
| 237 | memset(&res, 0, sizeof(res)); |
| 238 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 239 | TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), |
| 240 | TB_CFG_PKG_XDOMAIN_RESP, |
| 241 | XDOMAIN_DEFAULT_TIMEOUT); |
| 242 | if (ret) |
| 243 | return ret; |
| 244 | |
| 245 | ret = tb_xdp_handle_error(&res.hdr); |
| 246 | if (ret) |
| 247 | return ret; |
| 248 | |
| 249 | uuid_copy(uuid, &res.src_uuid); |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence, |
| 254 | const uuid_t *uuid) |
| 255 | { |
| 256 | struct tb_xdp_uuid_response res; |
| 257 | |
| 258 | memset(&res, 0, sizeof(res)); |
| 259 | tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE, |
| 260 | sizeof(res)); |
| 261 | |
| 262 | uuid_copy(&res.src_uuid, uuid); |
| 263 | res.src_route_hi = upper_32_bits(route); |
| 264 | res.src_route_lo = lower_32_bits(route); |
| 265 | |
| 266 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 267 | TB_CFG_PKG_XDOMAIN_RESP); |
| 268 | } |
| 269 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 270 | static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, |
| 271 | enum tb_xdp_error error) |
| 272 | { |
| 273 | struct tb_xdp_error_response res; |
| 274 | |
| 275 | memset(&res, 0, sizeof(res)); |
| 276 | tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE, |
| 277 | sizeof(res)); |
| 278 | res.error = error; |
| 279 | |
| 280 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 281 | TB_CFG_PKG_XDOMAIN_RESP); |
| 282 | } |
| 283 | |
| 284 | static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, |
| 285 | const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry, |
| 286 | u32 **block, u32 *generation) |
| 287 | { |
| 288 | struct tb_xdp_properties_response *res; |
| 289 | struct tb_xdp_properties req; |
| 290 | u16 data_len, len; |
| 291 | size_t total_size; |
| 292 | u32 *data = NULL; |
| 293 | int ret; |
| 294 | |
| 295 | total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4; |
| 296 | res = kzalloc(total_size, GFP_KERNEL); |
| 297 | if (!res) |
| 298 | return -ENOMEM; |
| 299 | |
| 300 | memset(&req, 0, sizeof(req)); |
| 301 | tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST, |
| 302 | sizeof(req)); |
| 303 | memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); |
| 304 | memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); |
| 305 | |
| 306 | len = 0; |
| 307 | data_len = 0; |
| 308 | |
| 309 | do { |
| 310 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 311 | TB_CFG_PKG_XDOMAIN_REQ, res, |
| 312 | total_size, TB_CFG_PKG_XDOMAIN_RESP, |
| 313 | XDOMAIN_DEFAULT_TIMEOUT); |
| 314 | if (ret) |
| 315 | goto err; |
| 316 | |
| 317 | ret = tb_xdp_handle_error(&res->hdr); |
| 318 | if (ret) |
| 319 | goto err; |
| 320 | |
| 321 | /* |
| 322 | * Package length includes the whole payload without the |
| 323 | * XDomain header. Validate first that the package is at |
| 324 | * least size of the response structure. |
| 325 | */ |
| 326 | len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
| 327 | if (len < sizeof(*res) / 4) { |
| 328 | ret = -EINVAL; |
| 329 | goto err; |
| 330 | } |
| 331 | |
| 332 | len += sizeof(res->hdr.xd_hdr) / 4; |
| 333 | len -= sizeof(*res) / 4; |
| 334 | |
| 335 | if (res->offset != req.offset) { |
| 336 | ret = -EINVAL; |
| 337 | goto err; |
| 338 | } |
| 339 | |
| 340 | /* |
| 341 | * First time allocate block that has enough space for |
| 342 | * the whole properties block. |
| 343 | */ |
| 344 | if (!data) { |
| 345 | data_len = res->data_length; |
| 346 | if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) { |
| 347 | ret = -E2BIG; |
| 348 | goto err; |
| 349 | } |
| 350 | |
| 351 | data = kcalloc(data_len, sizeof(u32), GFP_KERNEL); |
| 352 | if (!data) { |
| 353 | ret = -ENOMEM; |
| 354 | goto err; |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | memcpy(data + req.offset, res->data, len * 4); |
| 359 | req.offset += len; |
| 360 | } while (!data_len || req.offset < data_len); |
| 361 | |
| 362 | *block = data; |
| 363 | *generation = res->generation; |
| 364 | |
| 365 | kfree(res); |
| 366 | |
| 367 | return data_len; |
| 368 | |
| 369 | err: |
| 370 | kfree(data); |
| 371 | kfree(res); |
| 372 | |
| 373 | return ret; |
| 374 | } |
| 375 | |
| 376 | static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, |
| 377 | u64 route, u8 sequence, const uuid_t *src_uuid, |
| 378 | const struct tb_xdp_properties *req) |
| 379 | { |
| 380 | struct tb_xdp_properties_response *res; |
| 381 | size_t total_size; |
| 382 | u16 len; |
| 383 | int ret; |
| 384 | |
| 385 | /* |
| 386 | * Currently we expect all requests to be directed to us. The |
| 387 | * protocol supports forwarding, though which we might add |
| 388 | * support later on. |
| 389 | */ |
| 390 | if (!uuid_equal(src_uuid, &req->dst_uuid)) { |
| 391 | tb_xdp_error_response(ctl, route, sequence, |
| 392 | ERROR_UNKNOWN_DOMAIN); |
| 393 | return 0; |
| 394 | } |
| 395 | |
| 396 | mutex_lock(&xdomain_lock); |
| 397 | |
| 398 | if (req->offset >= xdomain_property_block_len) { |
| 399 | mutex_unlock(&xdomain_lock); |
| 400 | return -EINVAL; |
| 401 | } |
| 402 | |
| 403 | len = xdomain_property_block_len - req->offset; |
| 404 | len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); |
| 405 | total_size = sizeof(*res) + len * 4; |
| 406 | |
| 407 | res = kzalloc(total_size, GFP_KERNEL); |
| 408 | if (!res) { |
| 409 | mutex_unlock(&xdomain_lock); |
| 410 | return -ENOMEM; |
| 411 | } |
| 412 | |
| 413 | tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE, |
| 414 | total_size); |
| 415 | res->generation = xdomain_property_block_gen; |
| 416 | res->data_length = xdomain_property_block_len; |
| 417 | res->offset = req->offset; |
| 418 | uuid_copy(&res->src_uuid, src_uuid); |
| 419 | uuid_copy(&res->dst_uuid, &req->src_uuid); |
| 420 | memcpy(res->data, &xdomain_property_block[req->offset], len * 4); |
| 421 | |
| 422 | mutex_unlock(&xdomain_lock); |
| 423 | |
| 424 | ret = __tb_xdomain_response(ctl, res, total_size, |
| 425 | TB_CFG_PKG_XDOMAIN_RESP); |
| 426 | |
| 427 | kfree(res); |
| 428 | return ret; |
| 429 | } |
| 430 | |
| 431 | static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, |
| 432 | int retry, const uuid_t *uuid) |
| 433 | { |
| 434 | struct tb_xdp_properties_changed_response res; |
| 435 | struct tb_xdp_properties_changed req; |
| 436 | int ret; |
| 437 | |
| 438 | memset(&req, 0, sizeof(req)); |
| 439 | tb_xdp_fill_header(&req.hdr, route, retry % 4, |
| 440 | PROPERTIES_CHANGED_REQUEST, sizeof(req)); |
| 441 | uuid_copy(&req.src_uuid, uuid); |
| 442 | |
| 443 | memset(&res, 0, sizeof(res)); |
| 444 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 445 | TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), |
| 446 | TB_CFG_PKG_XDOMAIN_RESP, |
| 447 | XDOMAIN_DEFAULT_TIMEOUT); |
| 448 | if (ret) |
| 449 | return ret; |
| 450 | |
| 451 | return tb_xdp_handle_error(&res.hdr); |
| 452 | } |
| 453 | |
| 454 | static int |
| 455 | tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) |
| 456 | { |
| 457 | struct tb_xdp_properties_changed_response res; |
| 458 | |
| 459 | memset(&res, 0, sizeof(res)); |
| 460 | tb_xdp_fill_header(&res.hdr, route, sequence, |
| 461 | PROPERTIES_CHANGED_RESPONSE, sizeof(res)); |
| 462 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 463 | TB_CFG_PKG_XDOMAIN_RESP); |
| 464 | } |
| 465 | |
| 466 | /** |
| 467 | * tb_register_protocol_handler() - Register protocol handler |
| 468 | * @handler: Handler to register |
| 469 | * |
| 470 | * This allows XDomain service drivers to hook into incoming XDomain |
| 471 | * messages. After this function is called the service driver needs to |
| 472 | * be able to handle calls to callback whenever a package with the |
| 473 | * registered protocol is received. |
| 474 | */ |
| 475 | int tb_register_protocol_handler(struct tb_protocol_handler *handler) |
| 476 | { |
| 477 | if (!handler->uuid || !handler->callback) |
| 478 | return -EINVAL; |
| 479 | if (uuid_equal(handler->uuid, &tb_xdp_uuid)) |
| 480 | return -EINVAL; |
| 481 | |
| 482 | mutex_lock(&xdomain_lock); |
| 483 | list_add_tail(&handler->list, &protocol_handlers); |
| 484 | mutex_unlock(&xdomain_lock); |
| 485 | |
| 486 | return 0; |
| 487 | } |
| 488 | EXPORT_SYMBOL_GPL(tb_register_protocol_handler); |
| 489 | |
| 490 | /** |
| 491 | * tb_unregister_protocol_handler() - Unregister protocol handler |
| 492 | * @handler: Handler to unregister |
| 493 | * |
| 494 | * Removes the previously registered protocol handler. |
| 495 | */ |
| 496 | void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) |
| 497 | { |
| 498 | mutex_lock(&xdomain_lock); |
| 499 | list_del_init(&handler->list); |
| 500 | mutex_unlock(&xdomain_lock); |
| 501 | } |
| 502 | EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); |
| 503 | |
| 504 | static void tb_xdp_handle_request(struct work_struct *work) |
| 505 | { |
| 506 | struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); |
| 507 | const struct tb_xdp_header *pkg = xw->pkg; |
| 508 | const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; |
| 509 | struct tb *tb = xw->tb; |
| 510 | struct tb_ctl *ctl = tb->ctl; |
| 511 | const uuid_t *uuid; |
| 512 | int ret = 0; |
Dan Carpenter | 9a03c3d | 2017-10-17 15:32:17 +0300 | [diff] [blame] | 513 | u32 sequence; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 514 | u64 route; |
| 515 | |
| 516 | route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); |
| 517 | sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK; |
| 518 | sequence >>= TB_XDOMAIN_SN_SHIFT; |
| 519 | |
| 520 | mutex_lock(&tb->lock); |
| 521 | if (tb->root_switch) |
| 522 | uuid = tb->root_switch->uuid; |
| 523 | else |
| 524 | uuid = NULL; |
| 525 | mutex_unlock(&tb->lock); |
| 526 | |
| 527 | if (!uuid) { |
| 528 | tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); |
| 529 | goto out; |
| 530 | } |
| 531 | |
| 532 | switch (pkg->type) { |
| 533 | case PROPERTIES_REQUEST: |
| 534 | ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid, |
| 535 | (const struct tb_xdp_properties *)pkg); |
| 536 | break; |
| 537 | |
| 538 | case PROPERTIES_CHANGED_REQUEST: { |
| 539 | const struct tb_xdp_properties_changed *xchg = |
| 540 | (const struct tb_xdp_properties_changed *)pkg; |
| 541 | struct tb_xdomain *xd; |
| 542 | |
| 543 | ret = tb_xdp_properties_changed_response(ctl, route, sequence); |
| 544 | |
| 545 | /* |
| 546 | * Since the properties have been changed, let's update |
| 547 | * the xdomain related to this connection as well in |
| 548 | * case there is a change in services it offers. |
| 549 | */ |
| 550 | xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); |
| 551 | if (xd) { |
| 552 | queue_delayed_work(tb->wq, &xd->get_properties_work, |
| 553 | msecs_to_jiffies(50)); |
| 554 | tb_xdomain_put(xd); |
| 555 | } |
| 556 | |
| 557 | break; |
| 558 | } |
| 559 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 560 | case UUID_REQUEST_OLD: |
| 561 | case UUID_REQUEST: |
| 562 | ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); |
| 563 | break; |
| 564 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 565 | default: |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 566 | tb_xdp_error_response(ctl, route, sequence, |
| 567 | ERROR_NOT_SUPPORTED); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 568 | break; |
| 569 | } |
| 570 | |
| 571 | if (ret) { |
| 572 | tb_warn(tb, "failed to send XDomain response for %#x\n", |
| 573 | pkg->type); |
| 574 | } |
| 575 | |
| 576 | out: |
| 577 | kfree(xw->pkg); |
| 578 | kfree(xw); |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 579 | |
| 580 | tb_domain_put(tb); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 581 | } |
| 582 | |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 583 | static bool |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 584 | tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, |
| 585 | size_t size) |
| 586 | { |
| 587 | struct xdomain_request_work *xw; |
| 588 | |
| 589 | xw = kmalloc(sizeof(*xw), GFP_KERNEL); |
| 590 | if (!xw) |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 591 | return false; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 592 | |
| 593 | INIT_WORK(&xw->work, tb_xdp_handle_request); |
| 594 | xw->pkg = kmemdup(hdr, size, GFP_KERNEL); |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 595 | if (!xw->pkg) { |
| 596 | kfree(xw); |
| 597 | return false; |
| 598 | } |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 599 | xw->tb = tb_domain_get(tb); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 600 | |
Mika Westerberg | 559c1e1 | 2018-10-22 14:47:01 +0300 | [diff] [blame] | 601 | schedule_work(&xw->work); |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 602 | return true; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 603 | } |
| 604 | |
| 605 | /** |
| 606 | * tb_register_service_driver() - Register XDomain service driver |
| 607 | * @drv: Driver to register |
| 608 | * |
| 609 | * Registers new service driver from @drv to the bus. |
| 610 | */ |
| 611 | int tb_register_service_driver(struct tb_service_driver *drv) |
| 612 | { |
| 613 | drv->driver.bus = &tb_bus_type; |
| 614 | return driver_register(&drv->driver); |
| 615 | } |
| 616 | EXPORT_SYMBOL_GPL(tb_register_service_driver); |
| 617 | |
| 618 | /** |
| 619 | * tb_unregister_service_driver() - Unregister XDomain service driver |
| 620 | * @xdrv: Driver to unregister |
| 621 | * |
| 622 | * Unregisters XDomain service driver from the bus. |
| 623 | */ |
| 624 | void tb_unregister_service_driver(struct tb_service_driver *drv) |
| 625 | { |
| 626 | driver_unregister(&drv->driver); |
| 627 | } |
| 628 | EXPORT_SYMBOL_GPL(tb_unregister_service_driver); |
| 629 | |
| 630 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
| 631 | char *buf) |
| 632 | { |
| 633 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 634 | |
| 635 | /* |
| 636 | * It should be null terminated but anything else is pretty much |
| 637 | * allowed. |
| 638 | */ |
J. Bruce Fields | 04f7745 | 2019-08-06 06:57:50 -0400 | [diff] [blame] | 639 | return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 640 | } |
| 641 | static DEVICE_ATTR_RO(key); |
| 642 | |
| 643 | static int get_modalias(struct tb_service *svc, char *buf, size_t size) |
| 644 | { |
| 645 | return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key, |
| 646 | svc->prtcid, svc->prtcvers, svc->prtcrevs); |
| 647 | } |
| 648 | |
| 649 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
| 650 | char *buf) |
| 651 | { |
| 652 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 653 | |
| 654 | /* Full buffer size except new line and null termination */ |
| 655 | get_modalias(svc, buf, PAGE_SIZE - 2); |
| 656 | return sprintf(buf, "%s\n", buf); |
| 657 | } |
| 658 | static DEVICE_ATTR_RO(modalias); |
| 659 | |
| 660 | static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, |
| 661 | char *buf) |
| 662 | { |
| 663 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 664 | |
| 665 | return sprintf(buf, "%u\n", svc->prtcid); |
| 666 | } |
| 667 | static DEVICE_ATTR_RO(prtcid); |
| 668 | |
| 669 | static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, |
| 670 | char *buf) |
| 671 | { |
| 672 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 673 | |
| 674 | return sprintf(buf, "%u\n", svc->prtcvers); |
| 675 | } |
| 676 | static DEVICE_ATTR_RO(prtcvers); |
| 677 | |
| 678 | static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, |
| 679 | char *buf) |
| 680 | { |
| 681 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 682 | |
| 683 | return sprintf(buf, "%u\n", svc->prtcrevs); |
| 684 | } |
| 685 | static DEVICE_ATTR_RO(prtcrevs); |
| 686 | |
| 687 | static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, |
| 688 | char *buf) |
| 689 | { |
| 690 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 691 | |
| 692 | return sprintf(buf, "0x%08x\n", svc->prtcstns); |
| 693 | } |
| 694 | static DEVICE_ATTR_RO(prtcstns); |
| 695 | |
| 696 | static struct attribute *tb_service_attrs[] = { |
| 697 | &dev_attr_key.attr, |
| 698 | &dev_attr_modalias.attr, |
| 699 | &dev_attr_prtcid.attr, |
| 700 | &dev_attr_prtcvers.attr, |
| 701 | &dev_attr_prtcrevs.attr, |
| 702 | &dev_attr_prtcstns.attr, |
| 703 | NULL, |
| 704 | }; |
| 705 | |
| 706 | static struct attribute_group tb_service_attr_group = { |
| 707 | .attrs = tb_service_attrs, |
| 708 | }; |
| 709 | |
| 710 | static const struct attribute_group *tb_service_attr_groups[] = { |
| 711 | &tb_service_attr_group, |
| 712 | NULL, |
| 713 | }; |
| 714 | |
| 715 | static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env) |
| 716 | { |
| 717 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 718 | char modalias[64]; |
| 719 | |
| 720 | get_modalias(svc, modalias, sizeof(modalias)); |
| 721 | return add_uevent_var(env, "MODALIAS=%s", modalias); |
| 722 | } |
| 723 | |
| 724 | static void tb_service_release(struct device *dev) |
| 725 | { |
| 726 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 727 | struct tb_xdomain *xd = tb_service_parent(svc); |
| 728 | |
| 729 | ida_simple_remove(&xd->service_ids, svc->id); |
| 730 | kfree(svc->key); |
| 731 | kfree(svc); |
| 732 | } |
| 733 | |
| 734 | struct device_type tb_service_type = { |
| 735 | .name = "thunderbolt_service", |
| 736 | .groups = tb_service_attr_groups, |
| 737 | .uevent = tb_service_uevent, |
| 738 | .release = tb_service_release, |
| 739 | }; |
| 740 | EXPORT_SYMBOL_GPL(tb_service_type); |
| 741 | |
| 742 | static int remove_missing_service(struct device *dev, void *data) |
| 743 | { |
| 744 | struct tb_xdomain *xd = data; |
| 745 | struct tb_service *svc; |
| 746 | |
| 747 | svc = tb_to_service(dev); |
| 748 | if (!svc) |
| 749 | return 0; |
| 750 | |
| 751 | if (!tb_property_find(xd->properties, svc->key, |
| 752 | TB_PROPERTY_TYPE_DIRECTORY)) |
| 753 | device_unregister(dev); |
| 754 | |
| 755 | return 0; |
| 756 | } |
| 757 | |
| 758 | static int find_service(struct device *dev, void *data) |
| 759 | { |
| 760 | const struct tb_property *p = data; |
| 761 | struct tb_service *svc; |
| 762 | |
| 763 | svc = tb_to_service(dev); |
| 764 | if (!svc) |
| 765 | return 0; |
| 766 | |
| 767 | return !strcmp(svc->key, p->key); |
| 768 | } |
| 769 | |
| 770 | static int populate_service(struct tb_service *svc, |
| 771 | struct tb_property *property) |
| 772 | { |
| 773 | struct tb_property_dir *dir = property->value.dir; |
| 774 | struct tb_property *p; |
| 775 | |
| 776 | /* Fill in standard properties */ |
| 777 | p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE); |
| 778 | if (p) |
| 779 | svc->prtcid = p->value.immediate; |
| 780 | p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); |
| 781 | if (p) |
| 782 | svc->prtcvers = p->value.immediate; |
| 783 | p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); |
| 784 | if (p) |
| 785 | svc->prtcrevs = p->value.immediate; |
| 786 | p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); |
| 787 | if (p) |
| 788 | svc->prtcstns = p->value.immediate; |
| 789 | |
| 790 | svc->key = kstrdup(property->key, GFP_KERNEL); |
| 791 | if (!svc->key) |
| 792 | return -ENOMEM; |
| 793 | |
| 794 | return 0; |
| 795 | } |
| 796 | |
| 797 | static void enumerate_services(struct tb_xdomain *xd) |
| 798 | { |
| 799 | struct tb_service *svc; |
| 800 | struct tb_property *p; |
| 801 | struct device *dev; |
Aditya Pakki | 9aabb68 | 2019-03-20 11:34:09 -0500 | [diff] [blame] | 802 | int id; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 803 | |
| 804 | /* |
| 805 | * First remove all services that are not available anymore in |
| 806 | * the updated property block. |
| 807 | */ |
| 808 | device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); |
| 809 | |
| 810 | /* Then re-enumerate properties creating new services as we go */ |
| 811 | tb_property_for_each(xd->properties, p) { |
| 812 | if (p->type != TB_PROPERTY_TYPE_DIRECTORY) |
| 813 | continue; |
| 814 | |
| 815 | /* If the service exists already we are fine */ |
| 816 | dev = device_find_child(&xd->dev, p, find_service); |
| 817 | if (dev) { |
| 818 | put_device(dev); |
| 819 | continue; |
| 820 | } |
| 821 | |
| 822 | svc = kzalloc(sizeof(*svc), GFP_KERNEL); |
| 823 | if (!svc) |
| 824 | break; |
| 825 | |
| 826 | if (populate_service(svc, p)) { |
| 827 | kfree(svc); |
| 828 | break; |
| 829 | } |
| 830 | |
Aditya Pakki | 9aabb68 | 2019-03-20 11:34:09 -0500 | [diff] [blame] | 831 | id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); |
| 832 | if (id < 0) { |
| 833 | kfree(svc); |
| 834 | break; |
| 835 | } |
| 836 | svc->id = id; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 837 | svc->dev.bus = &tb_bus_type; |
| 838 | svc->dev.type = &tb_service_type; |
| 839 | svc->dev.parent = &xd->dev; |
| 840 | dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); |
| 841 | |
| 842 | if (device_register(&svc->dev)) { |
| 843 | put_device(&svc->dev); |
| 844 | break; |
| 845 | } |
| 846 | } |
| 847 | } |
| 848 | |
| 849 | static int populate_properties(struct tb_xdomain *xd, |
| 850 | struct tb_property_dir *dir) |
| 851 | { |
| 852 | const struct tb_property *p; |
| 853 | |
| 854 | /* Required properties */ |
| 855 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); |
| 856 | if (!p) |
| 857 | return -EINVAL; |
| 858 | xd->device = p->value.immediate; |
| 859 | |
| 860 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); |
| 861 | if (!p) |
| 862 | return -EINVAL; |
| 863 | xd->vendor = p->value.immediate; |
| 864 | |
| 865 | kfree(xd->device_name); |
| 866 | xd->device_name = NULL; |
| 867 | kfree(xd->vendor_name); |
| 868 | xd->vendor_name = NULL; |
| 869 | |
| 870 | /* Optional properties */ |
| 871 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); |
| 872 | if (p) |
| 873 | xd->device_name = kstrdup(p->value.text, GFP_KERNEL); |
| 874 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); |
| 875 | if (p) |
| 876 | xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); |
| 877 | |
| 878 | return 0; |
| 879 | } |
| 880 | |
| 881 | /* Called with @xd->lock held */ |
| 882 | static void tb_xdomain_restore_paths(struct tb_xdomain *xd) |
| 883 | { |
| 884 | if (!xd->resume) |
| 885 | return; |
| 886 | |
| 887 | xd->resume = false; |
| 888 | if (xd->transmit_path) { |
| 889 | dev_dbg(&xd->dev, "re-establishing DMA path\n"); |
| 890 | tb_domain_approve_xdomain_paths(xd->tb, xd); |
| 891 | } |
| 892 | } |
| 893 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 894 | static void tb_xdomain_get_uuid(struct work_struct *work) |
| 895 | { |
| 896 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 897 | get_uuid_work.work); |
| 898 | struct tb *tb = xd->tb; |
| 899 | uuid_t uuid; |
| 900 | int ret; |
| 901 | |
| 902 | ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); |
| 903 | if (ret < 0) { |
| 904 | if (xd->uuid_retries-- > 0) { |
| 905 | queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, |
| 906 | msecs_to_jiffies(100)); |
| 907 | } else { |
| 908 | dev_dbg(&xd->dev, "failed to read remote UUID\n"); |
| 909 | } |
| 910 | return; |
| 911 | } |
| 912 | |
| 913 | if (uuid_equal(&uuid, xd->local_uuid)) { |
| 914 | dev_dbg(&xd->dev, "intra-domain loop detected\n"); |
| 915 | return; |
| 916 | } |
| 917 | |
| 918 | /* |
| 919 | * If the UUID is different, there is another domain connected |
| 920 | * so mark this one unplugged and wait for the connection |
| 921 | * manager to replace it. |
| 922 | */ |
| 923 | if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) { |
| 924 | dev_dbg(&xd->dev, "remote UUID is different, unplugging\n"); |
| 925 | xd->is_unplugged = true; |
| 926 | return; |
| 927 | } |
| 928 | |
| 929 | /* First time fill in the missing UUID */ |
| 930 | if (!xd->remote_uuid) { |
| 931 | xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); |
| 932 | if (!xd->remote_uuid) |
| 933 | return; |
| 934 | } |
| 935 | |
| 936 | /* Now we can start the normal properties exchange */ |
| 937 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 938 | msecs_to_jiffies(100)); |
| 939 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 940 | msecs_to_jiffies(1000)); |
| 941 | } |
| 942 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 943 | static void tb_xdomain_get_properties(struct work_struct *work) |
| 944 | { |
| 945 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 946 | get_properties_work.work); |
| 947 | struct tb_property_dir *dir; |
| 948 | struct tb *tb = xd->tb; |
| 949 | bool update = false; |
| 950 | u32 *block = NULL; |
| 951 | u32 gen = 0; |
| 952 | int ret; |
| 953 | |
| 954 | ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, |
| 955 | xd->remote_uuid, xd->properties_retries, |
| 956 | &block, &gen); |
| 957 | if (ret < 0) { |
| 958 | if (xd->properties_retries-- > 0) { |
| 959 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 960 | msecs_to_jiffies(1000)); |
| 961 | } else { |
| 962 | /* Give up now */ |
| 963 | dev_err(&xd->dev, |
| 964 | "failed read XDomain properties from %pUb\n", |
| 965 | xd->remote_uuid); |
| 966 | } |
| 967 | return; |
| 968 | } |
| 969 | |
| 970 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; |
| 971 | |
| 972 | mutex_lock(&xd->lock); |
| 973 | |
| 974 | /* Only accept newer generation properties */ |
| 975 | if (xd->properties && gen <= xd->property_block_gen) { |
| 976 | /* |
| 977 | * On resume it is likely that the properties block is |
| 978 | * not changed (unless the other end added or removed |
| 979 | * services). However, we need to make sure the existing |
| 980 | * DMA paths are restored properly. |
| 981 | */ |
| 982 | tb_xdomain_restore_paths(xd); |
| 983 | goto err_free_block; |
| 984 | } |
| 985 | |
| 986 | dir = tb_property_parse_dir(block, ret); |
| 987 | if (!dir) { |
| 988 | dev_err(&xd->dev, "failed to parse XDomain properties\n"); |
| 989 | goto err_free_block; |
| 990 | } |
| 991 | |
| 992 | ret = populate_properties(xd, dir); |
| 993 | if (ret) { |
| 994 | dev_err(&xd->dev, "missing XDomain properties in response\n"); |
| 995 | goto err_free_dir; |
| 996 | } |
| 997 | |
| 998 | /* Release the existing one */ |
| 999 | if (xd->properties) { |
| 1000 | tb_property_free_dir(xd->properties); |
| 1001 | update = true; |
| 1002 | } |
| 1003 | |
| 1004 | xd->properties = dir; |
| 1005 | xd->property_block_gen = gen; |
| 1006 | |
| 1007 | tb_xdomain_restore_paths(xd); |
| 1008 | |
| 1009 | mutex_unlock(&xd->lock); |
| 1010 | |
| 1011 | kfree(block); |
| 1012 | |
| 1013 | /* |
| 1014 | * Now the device should be ready enough so we can add it to the |
| 1015 | * bus and let userspace know about it. If the device is already |
| 1016 | * registered, we notify the userspace that it has changed. |
| 1017 | */ |
| 1018 | if (!update) { |
| 1019 | if (device_add(&xd->dev)) { |
| 1020 | dev_err(&xd->dev, "failed to add XDomain device\n"); |
| 1021 | return; |
| 1022 | } |
| 1023 | } else { |
| 1024 | kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); |
| 1025 | } |
| 1026 | |
| 1027 | enumerate_services(xd); |
| 1028 | return; |
| 1029 | |
| 1030 | err_free_dir: |
| 1031 | tb_property_free_dir(dir); |
| 1032 | err_free_block: |
| 1033 | kfree(block); |
| 1034 | mutex_unlock(&xd->lock); |
| 1035 | } |
| 1036 | |
| 1037 | static void tb_xdomain_properties_changed(struct work_struct *work) |
| 1038 | { |
| 1039 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 1040 | properties_changed_work.work); |
| 1041 | int ret; |
| 1042 | |
| 1043 | ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, |
| 1044 | xd->properties_changed_retries, xd->local_uuid); |
| 1045 | if (ret) { |
| 1046 | if (xd->properties_changed_retries-- > 0) |
| 1047 | queue_delayed_work(xd->tb->wq, |
| 1048 | &xd->properties_changed_work, |
| 1049 | msecs_to_jiffies(1000)); |
| 1050 | return; |
| 1051 | } |
| 1052 | |
| 1053 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; |
| 1054 | } |
| 1055 | |
| 1056 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
| 1057 | char *buf) |
| 1058 | { |
| 1059 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1060 | |
| 1061 | return sprintf(buf, "%#x\n", xd->device); |
| 1062 | } |
| 1063 | static DEVICE_ATTR_RO(device); |
| 1064 | |
| 1065 | static ssize_t |
| 1066 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
| 1067 | { |
| 1068 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1069 | int ret; |
| 1070 | |
| 1071 | if (mutex_lock_interruptible(&xd->lock)) |
| 1072 | return -ERESTARTSYS; |
| 1073 | ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); |
| 1074 | mutex_unlock(&xd->lock); |
| 1075 | |
| 1076 | return ret; |
| 1077 | } |
| 1078 | static DEVICE_ATTR_RO(device_name); |
| 1079 | |
| 1080 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
| 1081 | char *buf) |
| 1082 | { |
| 1083 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1084 | |
| 1085 | return sprintf(buf, "%#x\n", xd->vendor); |
| 1086 | } |
| 1087 | static DEVICE_ATTR_RO(vendor); |
| 1088 | |
| 1089 | static ssize_t |
| 1090 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
| 1091 | { |
| 1092 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1093 | int ret; |
| 1094 | |
| 1095 | if (mutex_lock_interruptible(&xd->lock)) |
| 1096 | return -ERESTARTSYS; |
| 1097 | ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); |
| 1098 | mutex_unlock(&xd->lock); |
| 1099 | |
| 1100 | return ret; |
| 1101 | } |
| 1102 | static DEVICE_ATTR_RO(vendor_name); |
| 1103 | |
| 1104 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
| 1105 | char *buf) |
| 1106 | { |
| 1107 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1108 | |
| 1109 | return sprintf(buf, "%pUb\n", xd->remote_uuid); |
| 1110 | } |
| 1111 | static DEVICE_ATTR_RO(unique_id); |
| 1112 | |
| 1113 | static struct attribute *xdomain_attrs[] = { |
| 1114 | &dev_attr_device.attr, |
| 1115 | &dev_attr_device_name.attr, |
| 1116 | &dev_attr_unique_id.attr, |
| 1117 | &dev_attr_vendor.attr, |
| 1118 | &dev_attr_vendor_name.attr, |
| 1119 | NULL, |
| 1120 | }; |
| 1121 | |
| 1122 | static struct attribute_group xdomain_attr_group = { |
| 1123 | .attrs = xdomain_attrs, |
| 1124 | }; |
| 1125 | |
| 1126 | static const struct attribute_group *xdomain_attr_groups[] = { |
| 1127 | &xdomain_attr_group, |
| 1128 | NULL, |
| 1129 | }; |
| 1130 | |
| 1131 | static void tb_xdomain_release(struct device *dev) |
| 1132 | { |
| 1133 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1134 | |
| 1135 | put_device(xd->dev.parent); |
| 1136 | |
| 1137 | tb_property_free_dir(xd->properties); |
| 1138 | ida_destroy(&xd->service_ids); |
| 1139 | |
| 1140 | kfree(xd->local_uuid); |
| 1141 | kfree(xd->remote_uuid); |
| 1142 | kfree(xd->device_name); |
| 1143 | kfree(xd->vendor_name); |
| 1144 | kfree(xd); |
| 1145 | } |
| 1146 | |
| 1147 | static void start_handshake(struct tb_xdomain *xd) |
| 1148 | { |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1149 | xd->uuid_retries = XDOMAIN_UUID_RETRIES; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1150 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; |
| 1151 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; |
| 1152 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1153 | if (xd->needs_uuid) { |
| 1154 | queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, |
| 1155 | msecs_to_jiffies(100)); |
| 1156 | } else { |
| 1157 | /* Start exchanging properties with the other host */ |
| 1158 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 1159 | msecs_to_jiffies(100)); |
| 1160 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 1161 | msecs_to_jiffies(1000)); |
| 1162 | } |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1163 | } |
| 1164 | |
| 1165 | static void stop_handshake(struct tb_xdomain *xd) |
| 1166 | { |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1167 | xd->uuid_retries = 0; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1168 | xd->properties_retries = 0; |
| 1169 | xd->properties_changed_retries = 0; |
| 1170 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1171 | cancel_delayed_work_sync(&xd->get_uuid_work); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1172 | cancel_delayed_work_sync(&xd->get_properties_work); |
| 1173 | cancel_delayed_work_sync(&xd->properties_changed_work); |
| 1174 | } |
| 1175 | |
| 1176 | static int __maybe_unused tb_xdomain_suspend(struct device *dev) |
| 1177 | { |
| 1178 | stop_handshake(tb_to_xdomain(dev)); |
| 1179 | return 0; |
| 1180 | } |
| 1181 | |
| 1182 | static int __maybe_unused tb_xdomain_resume(struct device *dev) |
| 1183 | { |
| 1184 | struct tb_xdomain *xd = tb_to_xdomain(dev); |
| 1185 | |
| 1186 | /* |
| 1187 | * Ask tb_xdomain_get_properties() restore any existing DMA |
| 1188 | * paths after properties are re-read. |
| 1189 | */ |
| 1190 | xd->resume = true; |
| 1191 | start_handshake(xd); |
| 1192 | |
| 1193 | return 0; |
| 1194 | } |
| 1195 | |
| 1196 | static const struct dev_pm_ops tb_xdomain_pm_ops = { |
| 1197 | SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume) |
| 1198 | }; |
| 1199 | |
| 1200 | struct device_type tb_xdomain_type = { |
| 1201 | .name = "thunderbolt_xdomain", |
| 1202 | .release = tb_xdomain_release, |
| 1203 | .pm = &tb_xdomain_pm_ops, |
| 1204 | }; |
| 1205 | EXPORT_SYMBOL_GPL(tb_xdomain_type); |
| 1206 | |
| 1207 | /** |
| 1208 | * tb_xdomain_alloc() - Allocate new XDomain object |
| 1209 | * @tb: Domain where the XDomain belongs |
| 1210 | * @parent: Parent device (the switch through the connection to the |
| 1211 | * other domain is reached). |
| 1212 | * @route: Route string used to reach the other domain |
| 1213 | * @local_uuid: Our local domain UUID |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1214 | * @remote_uuid: UUID of the other domain (optional) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1215 | * |
| 1216 | * Allocates new XDomain structure and returns pointer to that. The |
| 1217 | * object must be released by calling tb_xdomain_put(). |
| 1218 | */ |
| 1219 | struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, |
| 1220 | u64 route, const uuid_t *local_uuid, |
| 1221 | const uuid_t *remote_uuid) |
| 1222 | { |
| 1223 | struct tb_xdomain *xd; |
| 1224 | |
| 1225 | xd = kzalloc(sizeof(*xd), GFP_KERNEL); |
| 1226 | if (!xd) |
| 1227 | return NULL; |
| 1228 | |
| 1229 | xd->tb = tb; |
| 1230 | xd->route = route; |
| 1231 | ida_init(&xd->service_ids); |
| 1232 | mutex_init(&xd->lock); |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1233 | INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1234 | INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); |
| 1235 | INIT_DELAYED_WORK(&xd->properties_changed_work, |
| 1236 | tb_xdomain_properties_changed); |
| 1237 | |
| 1238 | xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); |
| 1239 | if (!xd->local_uuid) |
| 1240 | goto err_free; |
| 1241 | |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1242 | if (remote_uuid) { |
| 1243 | xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), |
| 1244 | GFP_KERNEL); |
| 1245 | if (!xd->remote_uuid) |
| 1246 | goto err_free_local_uuid; |
| 1247 | } else { |
| 1248 | xd->needs_uuid = true; |
| 1249 | } |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1250 | |
| 1251 | device_initialize(&xd->dev); |
| 1252 | xd->dev.parent = get_device(parent); |
| 1253 | xd->dev.bus = &tb_bus_type; |
| 1254 | xd->dev.type = &tb_xdomain_type; |
| 1255 | xd->dev.groups = xdomain_attr_groups; |
| 1256 | dev_set_name(&xd->dev, "%u-%llx", tb->index, route); |
| 1257 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 1258 | /* |
| 1259 | * This keeps the DMA powered on as long as we have active |
| 1260 | * connection to another host. |
| 1261 | */ |
| 1262 | pm_runtime_set_active(&xd->dev); |
| 1263 | pm_runtime_get_noresume(&xd->dev); |
| 1264 | pm_runtime_enable(&xd->dev); |
| 1265 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1266 | return xd; |
| 1267 | |
| 1268 | err_free_local_uuid: |
| 1269 | kfree(xd->local_uuid); |
| 1270 | err_free: |
| 1271 | kfree(xd); |
| 1272 | |
| 1273 | return NULL; |
| 1274 | } |
| 1275 | |
| 1276 | /** |
| 1277 | * tb_xdomain_add() - Add XDomain to the bus |
| 1278 | * @xd: XDomain to add |
| 1279 | * |
| 1280 | * This function starts XDomain discovery protocol handshake and |
| 1281 | * eventually adds the XDomain to the bus. After calling this function |
| 1282 | * the caller needs to call tb_xdomain_remove() in order to remove and |
| 1283 | * release the object regardless whether the handshake succeeded or not. |
| 1284 | */ |
| 1285 | void tb_xdomain_add(struct tb_xdomain *xd) |
| 1286 | { |
| 1287 | /* Start exchanging properties with the other host */ |
| 1288 | start_handshake(xd); |
| 1289 | } |
| 1290 | |
| 1291 | static int unregister_service(struct device *dev, void *data) |
| 1292 | { |
| 1293 | device_unregister(dev); |
| 1294 | return 0; |
| 1295 | } |
| 1296 | |
| 1297 | /** |
| 1298 | * tb_xdomain_remove() - Remove XDomain from the bus |
| 1299 | * @xd: XDomain to remove |
| 1300 | * |
| 1301 | * This will stop all ongoing configuration work and remove the XDomain |
| 1302 | * along with any services from the bus. When the last reference to @xd |
| 1303 | * is released the object will be released as well. |
| 1304 | */ |
| 1305 | void tb_xdomain_remove(struct tb_xdomain *xd) |
| 1306 | { |
| 1307 | stop_handshake(xd); |
| 1308 | |
| 1309 | device_for_each_child_reverse(&xd->dev, xd, unregister_service); |
| 1310 | |
Mika Westerberg | 2d8ff0b | 2018-07-25 11:48:39 +0300 | [diff] [blame] | 1311 | /* |
| 1312 | * Undo runtime PM here explicitly because it is possible that |
| 1313 | * the XDomain was never added to the bus and thus device_del() |
| 1314 | * is not called for it (device_del() would handle this otherwise). |
| 1315 | */ |
| 1316 | pm_runtime_disable(&xd->dev); |
| 1317 | pm_runtime_put_noidle(&xd->dev); |
| 1318 | pm_runtime_set_suspended(&xd->dev); |
| 1319 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1320 | if (!device_is_registered(&xd->dev)) |
| 1321 | put_device(&xd->dev); |
| 1322 | else |
| 1323 | device_unregister(&xd->dev); |
| 1324 | } |
| 1325 | |
| 1326 | /** |
| 1327 | * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection |
| 1328 | * @xd: XDomain connection |
| 1329 | * @transmit_path: HopID of the transmit path the other end is using to |
| 1330 | * send packets |
| 1331 | * @transmit_ring: DMA ring used to receive packets from the other end |
| 1332 | * @receive_path: HopID of the receive path the other end is using to |
| 1333 | * receive packets |
| 1334 | * @receive_ring: DMA ring used to send packets to the other end |
| 1335 | * |
| 1336 | * The function enables DMA paths accordingly so that after successful |
| 1337 | * return the caller can send and receive packets using high-speed DMA |
| 1338 | * path. |
| 1339 | * |
| 1340 | * Return: %0 in case of success and negative errno in case of error |
| 1341 | */ |
| 1342 | int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, |
| 1343 | u16 transmit_ring, u16 receive_path, |
| 1344 | u16 receive_ring) |
| 1345 | { |
| 1346 | int ret; |
| 1347 | |
| 1348 | mutex_lock(&xd->lock); |
| 1349 | |
| 1350 | if (xd->transmit_path) { |
| 1351 | ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; |
| 1352 | goto exit_unlock; |
| 1353 | } |
| 1354 | |
| 1355 | xd->transmit_path = transmit_path; |
| 1356 | xd->transmit_ring = transmit_ring; |
| 1357 | xd->receive_path = receive_path; |
| 1358 | xd->receive_ring = receive_ring; |
| 1359 | |
| 1360 | ret = tb_domain_approve_xdomain_paths(xd->tb, xd); |
| 1361 | |
| 1362 | exit_unlock: |
| 1363 | mutex_unlock(&xd->lock); |
| 1364 | |
| 1365 | return ret; |
| 1366 | } |
| 1367 | EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); |
| 1368 | |
| 1369 | /** |
| 1370 | * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection |
| 1371 | * @xd: XDomain connection |
| 1372 | * |
| 1373 | * This does the opposite of tb_xdomain_enable_paths(). After call to |
| 1374 | * this the caller is not expected to use the rings anymore. |
| 1375 | * |
| 1376 | * Return: %0 in case of success and negative errno in case of error |
| 1377 | */ |
| 1378 | int tb_xdomain_disable_paths(struct tb_xdomain *xd) |
| 1379 | { |
| 1380 | int ret = 0; |
| 1381 | |
| 1382 | mutex_lock(&xd->lock); |
| 1383 | if (xd->transmit_path) { |
| 1384 | xd->transmit_path = 0; |
| 1385 | xd->transmit_ring = 0; |
| 1386 | xd->receive_path = 0; |
| 1387 | xd->receive_ring = 0; |
| 1388 | |
| 1389 | ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd); |
| 1390 | } |
| 1391 | mutex_unlock(&xd->lock); |
| 1392 | |
| 1393 | return ret; |
| 1394 | } |
| 1395 | EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); |
| 1396 | |
| 1397 | struct tb_xdomain_lookup { |
| 1398 | const uuid_t *uuid; |
| 1399 | u8 link; |
| 1400 | u8 depth; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1401 | u64 route; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1402 | }; |
| 1403 | |
| 1404 | static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, |
| 1405 | const struct tb_xdomain_lookup *lookup) |
| 1406 | { |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 1407 | struct tb_port *port; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1408 | |
Mika Westerberg | b433d01 | 2019-09-30 14:07:22 +0300 | [diff] [blame] | 1409 | tb_switch_for_each_port(sw, port) { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1410 | struct tb_xdomain *xd; |
| 1411 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1412 | if (port->xdomain) { |
| 1413 | xd = port->xdomain; |
| 1414 | |
| 1415 | if (lookup->uuid) { |
Mika Westerberg | 3b4b323 | 2018-09-28 16:30:16 +0300 | [diff] [blame] | 1416 | if (xd->remote_uuid && |
| 1417 | uuid_equal(xd->remote_uuid, lookup->uuid)) |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1418 | return xd; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1419 | } else if (lookup->link && |
| 1420 | lookup->link == xd->link && |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1421 | lookup->depth == xd->depth) { |
| 1422 | return xd; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1423 | } else if (lookup->route && |
| 1424 | lookup->route == xd->route) { |
| 1425 | return xd; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1426 | } |
Mika Westerberg | dfe40ca | 2019-03-07 15:26:45 +0200 | [diff] [blame] | 1427 | } else if (tb_port_has_remote(port)) { |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1428 | xd = switch_find_xdomain(port->remote->sw, lookup); |
| 1429 | if (xd) |
| 1430 | return xd; |
| 1431 | } |
| 1432 | } |
| 1433 | |
| 1434 | return NULL; |
| 1435 | } |
| 1436 | |
| 1437 | /** |
| 1438 | * tb_xdomain_find_by_uuid() - Find an XDomain by UUID |
| 1439 | * @tb: Domain where the XDomain belongs to |
| 1440 | * @uuid: UUID to look for |
| 1441 | * |
| 1442 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1443 | * The returned XDomain will have its reference count increased so the |
| 1444 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1445 | * object. |
| 1446 | * |
| 1447 | * This will find all XDomains including the ones that are not yet added |
| 1448 | * to the bus (handshake is still in progress). |
| 1449 | * |
| 1450 | * The caller needs to hold @tb->lock. |
| 1451 | */ |
| 1452 | struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
| 1453 | { |
| 1454 | struct tb_xdomain_lookup lookup; |
| 1455 | struct tb_xdomain *xd; |
| 1456 | |
| 1457 | memset(&lookup, 0, sizeof(lookup)); |
| 1458 | lookup.uuid = uuid; |
| 1459 | |
| 1460 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1461 | return tb_xdomain_get(xd); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1462 | } |
| 1463 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid); |
| 1464 | |
| 1465 | /** |
| 1466 | * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth |
| 1467 | * @tb: Domain where the XDomain belongs to |
| 1468 | * @link: Root switch link number |
| 1469 | * @depth: Depth in the link |
| 1470 | * |
| 1471 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1472 | * The returned XDomain will have its reference count increased so the |
| 1473 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1474 | * object. |
| 1475 | * |
| 1476 | * This will find all XDomains including the ones that are not yet added |
| 1477 | * to the bus (handshake is still in progress). |
| 1478 | * |
| 1479 | * The caller needs to hold @tb->lock. |
| 1480 | */ |
| 1481 | struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, |
| 1482 | u8 depth) |
| 1483 | { |
| 1484 | struct tb_xdomain_lookup lookup; |
| 1485 | struct tb_xdomain *xd; |
| 1486 | |
| 1487 | memset(&lookup, 0, sizeof(lookup)); |
| 1488 | lookup.link = link; |
| 1489 | lookup.depth = depth; |
| 1490 | |
| 1491 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1492 | return tb_xdomain_get(xd); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1493 | } |
| 1494 | |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame] | 1495 | /** |
| 1496 | * tb_xdomain_find_by_route() - Find an XDomain by route string |
| 1497 | * @tb: Domain where the XDomain belongs to |
| 1498 | * @route: XDomain route string |
| 1499 | * |
| 1500 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1501 | * The returned XDomain will have its reference count increased so the |
| 1502 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1503 | * object. |
| 1504 | * |
| 1505 | * This will find all XDomains including the ones that are not yet added |
| 1506 | * to the bus (handshake is still in progress). |
| 1507 | * |
| 1508 | * The caller needs to hold @tb->lock. |
| 1509 | */ |
| 1510 | struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) |
| 1511 | { |
| 1512 | struct tb_xdomain_lookup lookup; |
| 1513 | struct tb_xdomain *xd; |
| 1514 | |
| 1515 | memset(&lookup, 0, sizeof(lookup)); |
| 1516 | lookup.route = route; |
| 1517 | |
| 1518 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
| 1519 | return tb_xdomain_get(xd); |
| 1520 | } |
| 1521 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route); |
| 1522 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1523 | bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, |
| 1524 | const void *buf, size_t size) |
| 1525 | { |
| 1526 | const struct tb_protocol_handler *handler, *tmp; |
| 1527 | const struct tb_xdp_header *hdr = buf; |
| 1528 | unsigned int length; |
| 1529 | int ret = 0; |
| 1530 | |
| 1531 | /* We expect the packet is at least size of the header */ |
| 1532 | length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
| 1533 | if (length != size / 4 - sizeof(hdr->xd_hdr) / 4) |
| 1534 | return true; |
| 1535 | if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4) |
| 1536 | return true; |
| 1537 | |
| 1538 | /* |
| 1539 | * Handle XDomain discovery protocol packets directly here. For |
| 1540 | * other protocols (based on their UUID) we call registered |
| 1541 | * handlers in turn. |
| 1542 | */ |
| 1543 | if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) { |
Aditya Pakki | 48f40b9 | 2019-03-20 11:47:20 -0500 | [diff] [blame] | 1544 | if (type == TB_CFG_PKG_XDOMAIN_REQ) |
| 1545 | return tb_xdp_schedule_request(tb, hdr, size); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1546 | return false; |
| 1547 | } |
| 1548 | |
| 1549 | mutex_lock(&xdomain_lock); |
| 1550 | list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) { |
| 1551 | if (!uuid_equal(&hdr->uuid, handler->uuid)) |
| 1552 | continue; |
| 1553 | |
| 1554 | mutex_unlock(&xdomain_lock); |
| 1555 | ret = handler->callback(buf, size, handler->data); |
| 1556 | mutex_lock(&xdomain_lock); |
| 1557 | |
| 1558 | if (ret) |
| 1559 | break; |
| 1560 | } |
| 1561 | mutex_unlock(&xdomain_lock); |
| 1562 | |
| 1563 | return ret > 0; |
| 1564 | } |
| 1565 | |
| 1566 | static int rebuild_property_block(void) |
| 1567 | { |
| 1568 | u32 *block, len; |
| 1569 | int ret; |
| 1570 | |
| 1571 | ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); |
| 1572 | if (ret < 0) |
| 1573 | return ret; |
| 1574 | |
| 1575 | len = ret; |
| 1576 | |
| 1577 | block = kcalloc(len, sizeof(u32), GFP_KERNEL); |
| 1578 | if (!block) |
| 1579 | return -ENOMEM; |
| 1580 | |
| 1581 | ret = tb_property_format_dir(xdomain_property_dir, block, len); |
| 1582 | if (ret) { |
| 1583 | kfree(block); |
| 1584 | return ret; |
| 1585 | } |
| 1586 | |
| 1587 | kfree(xdomain_property_block); |
| 1588 | xdomain_property_block = block; |
| 1589 | xdomain_property_block_len = len; |
| 1590 | xdomain_property_block_gen++; |
| 1591 | |
| 1592 | return 0; |
| 1593 | } |
| 1594 | |
| 1595 | static int update_xdomain(struct device *dev, void *data) |
| 1596 | { |
| 1597 | struct tb_xdomain *xd; |
| 1598 | |
| 1599 | xd = tb_to_xdomain(dev); |
| 1600 | if (xd) { |
| 1601 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 1602 | msecs_to_jiffies(50)); |
| 1603 | } |
| 1604 | |
| 1605 | return 0; |
| 1606 | } |
| 1607 | |
| 1608 | static void update_all_xdomains(void) |
| 1609 | { |
| 1610 | bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain); |
| 1611 | } |
| 1612 | |
| 1613 | static bool remove_directory(const char *key, const struct tb_property_dir *dir) |
| 1614 | { |
| 1615 | struct tb_property *p; |
| 1616 | |
| 1617 | p = tb_property_find(xdomain_property_dir, key, |
| 1618 | TB_PROPERTY_TYPE_DIRECTORY); |
| 1619 | if (p && p->value.dir == dir) { |
| 1620 | tb_property_remove(p); |
| 1621 | return true; |
| 1622 | } |
| 1623 | return false; |
| 1624 | } |
| 1625 | |
| 1626 | /** |
| 1627 | * tb_register_property_dir() - Register property directory to the host |
| 1628 | * @key: Key (name) of the directory to add |
| 1629 | * @dir: Directory to add |
| 1630 | * |
| 1631 | * Service drivers can use this function to add new property directory |
| 1632 | * to the host available properties. The other connected hosts are |
| 1633 | * notified so they can re-read properties of this host if they are |
| 1634 | * interested. |
| 1635 | * |
| 1636 | * Return: %0 on success and negative errno on failure |
| 1637 | */ |
| 1638 | int tb_register_property_dir(const char *key, struct tb_property_dir *dir) |
| 1639 | { |
| 1640 | int ret; |
| 1641 | |
Mika Westerberg | acb40d8 | 2017-10-09 16:22:34 +0300 | [diff] [blame] | 1642 | if (WARN_ON(!xdomain_property_dir)) |
| 1643 | return -EAGAIN; |
| 1644 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1645 | if (!key || strlen(key) > 8) |
| 1646 | return -EINVAL; |
| 1647 | |
| 1648 | mutex_lock(&xdomain_lock); |
| 1649 | if (tb_property_find(xdomain_property_dir, key, |
| 1650 | TB_PROPERTY_TYPE_DIRECTORY)) { |
| 1651 | ret = -EEXIST; |
| 1652 | goto err_unlock; |
| 1653 | } |
| 1654 | |
| 1655 | ret = tb_property_add_dir(xdomain_property_dir, key, dir); |
| 1656 | if (ret) |
| 1657 | goto err_unlock; |
| 1658 | |
| 1659 | ret = rebuild_property_block(); |
| 1660 | if (ret) { |
| 1661 | remove_directory(key, dir); |
| 1662 | goto err_unlock; |
| 1663 | } |
| 1664 | |
| 1665 | mutex_unlock(&xdomain_lock); |
| 1666 | update_all_xdomains(); |
| 1667 | return 0; |
| 1668 | |
| 1669 | err_unlock: |
| 1670 | mutex_unlock(&xdomain_lock); |
| 1671 | return ret; |
| 1672 | } |
| 1673 | EXPORT_SYMBOL_GPL(tb_register_property_dir); |
| 1674 | |
| 1675 | /** |
| 1676 | * tb_unregister_property_dir() - Removes property directory from host |
| 1677 | * @key: Key (name) of the directory |
| 1678 | * @dir: Directory to remove |
| 1679 | * |
| 1680 | * This will remove the existing directory from this host and notify the |
| 1681 | * connected hosts about the change. |
| 1682 | */ |
| 1683 | void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) |
| 1684 | { |
| 1685 | int ret = 0; |
| 1686 | |
| 1687 | mutex_lock(&xdomain_lock); |
| 1688 | if (remove_directory(key, dir)) |
| 1689 | ret = rebuild_property_block(); |
| 1690 | mutex_unlock(&xdomain_lock); |
| 1691 | |
| 1692 | if (!ret) |
| 1693 | update_all_xdomains(); |
| 1694 | } |
| 1695 | EXPORT_SYMBOL_GPL(tb_unregister_property_dir); |
| 1696 | |
| 1697 | int tb_xdomain_init(void) |
| 1698 | { |
| 1699 | int ret; |
| 1700 | |
| 1701 | xdomain_property_dir = tb_property_create_dir(NULL); |
| 1702 | if (!xdomain_property_dir) |
| 1703 | return -ENOMEM; |
| 1704 | |
| 1705 | /* |
| 1706 | * Initialize standard set of properties without any service |
| 1707 | * directories. Those will be added by service drivers |
| 1708 | * themselves when they are loaded. |
| 1709 | */ |
| 1710 | tb_property_add_immediate(xdomain_property_dir, "vendorid", |
| 1711 | PCI_VENDOR_ID_INTEL); |
| 1712 | tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp."); |
| 1713 | tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); |
| 1714 | tb_property_add_text(xdomain_property_dir, "deviceid", |
| 1715 | utsname()->nodename); |
| 1716 | tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); |
| 1717 | |
| 1718 | ret = rebuild_property_block(); |
| 1719 | if (ret) { |
| 1720 | tb_property_free_dir(xdomain_property_dir); |
| 1721 | xdomain_property_dir = NULL; |
| 1722 | } |
| 1723 | |
| 1724 | return ret; |
| 1725 | } |
| 1726 | |
| 1727 | void tb_xdomain_exit(void) |
| 1728 | { |
| 1729 | kfree(xdomain_property_block); |
| 1730 | tb_property_free_dir(xdomain_property_dir); |
| 1731 | } |