Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Thunderbolt XDomain discovery protocol support |
| 3 | * |
| 4 | * Copyright (C) 2017, Intel Corporation |
| 5 | * Authors: Michael Jamet <michael.jamet@intel.com> |
| 6 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/device.h> |
| 14 | #include <linux/kmod.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/utsname.h> |
| 17 | #include <linux/uuid.h> |
| 18 | #include <linux/workqueue.h> |
| 19 | |
| 20 | #include "tb.h" |
| 21 | |
| 22 | #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */ |
| 23 | #define XDOMAIN_PROPERTIES_RETRIES 60 |
| 24 | #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 |
| 25 | |
| 26 | struct xdomain_request_work { |
| 27 | struct work_struct work; |
| 28 | struct tb_xdp_header *pkg; |
| 29 | struct tb *tb; |
| 30 | }; |
| 31 | |
| 32 | /* Serializes access to the properties and protocol handlers below */ |
| 33 | static DEFINE_MUTEX(xdomain_lock); |
| 34 | |
| 35 | /* Properties exposed to the remote domains */ |
| 36 | static struct tb_property_dir *xdomain_property_dir; |
| 37 | static u32 *xdomain_property_block; |
| 38 | static u32 xdomain_property_block_len; |
| 39 | static u32 xdomain_property_block_gen; |
| 40 | |
| 41 | /* Additional protocol handlers */ |
| 42 | static LIST_HEAD(protocol_handlers); |
| 43 | |
| 44 | /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */ |
| 45 | static const uuid_t tb_xdp_uuid = |
| 46 | UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, |
| 47 | 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); |
| 48 | |
| 49 | static bool tb_xdomain_match(const struct tb_cfg_request *req, |
| 50 | const struct ctl_pkg *pkg) |
| 51 | { |
| 52 | switch (pkg->frame.eof) { |
| 53 | case TB_CFG_PKG_ERROR: |
| 54 | return true; |
| 55 | |
| 56 | case TB_CFG_PKG_XDOMAIN_RESP: { |
| 57 | const struct tb_xdp_header *res_hdr = pkg->buffer; |
| 58 | const struct tb_xdp_header *req_hdr = req->request; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 59 | |
| 60 | if (pkg->frame.size < req->response_size / 4) |
| 61 | return false; |
| 62 | |
| 63 | /* Make sure route matches */ |
| 64 | if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) != |
| 65 | req_hdr->xd_hdr.route_hi) |
| 66 | return false; |
| 67 | if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo) |
| 68 | return false; |
| 69 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 70 | /* Check that the XDomain protocol matches */ |
| 71 | if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid)) |
| 72 | return false; |
| 73 | |
| 74 | return true; |
| 75 | } |
| 76 | |
| 77 | default: |
| 78 | return false; |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | static bool tb_xdomain_copy(struct tb_cfg_request *req, |
| 83 | const struct ctl_pkg *pkg) |
| 84 | { |
| 85 | memcpy(req->response, pkg->buffer, req->response_size); |
| 86 | req->result.err = 0; |
| 87 | return true; |
| 88 | } |
| 89 | |
| 90 | static void response_ready(void *data) |
| 91 | { |
| 92 | tb_cfg_request_put(data); |
| 93 | } |
| 94 | |
| 95 | static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response, |
| 96 | size_t size, enum tb_cfg_pkg_type type) |
| 97 | { |
| 98 | struct tb_cfg_request *req; |
| 99 | |
| 100 | req = tb_cfg_request_alloc(); |
| 101 | if (!req) |
| 102 | return -ENOMEM; |
| 103 | |
| 104 | req->match = tb_xdomain_match; |
| 105 | req->copy = tb_xdomain_copy; |
| 106 | req->request = response; |
| 107 | req->request_size = size; |
| 108 | req->request_type = type; |
| 109 | |
| 110 | return tb_cfg_request(ctl, req, response_ready, req); |
| 111 | } |
| 112 | |
| 113 | /** |
| 114 | * tb_xdomain_response() - Send a XDomain response message |
| 115 | * @xd: XDomain to send the message |
| 116 | * @response: Response to send |
| 117 | * @size: Size of the response |
| 118 | * @type: PDF type of the response |
| 119 | * |
| 120 | * This can be used to send a XDomain response message to the other |
| 121 | * domain. No response for the message is expected. |
| 122 | * |
| 123 | * Return: %0 in case of success and negative errno in case of failure |
| 124 | */ |
| 125 | int tb_xdomain_response(struct tb_xdomain *xd, const void *response, |
| 126 | size_t size, enum tb_cfg_pkg_type type) |
| 127 | { |
| 128 | return __tb_xdomain_response(xd->tb->ctl, response, size, type); |
| 129 | } |
| 130 | EXPORT_SYMBOL_GPL(tb_xdomain_response); |
| 131 | |
| 132 | static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request, |
| 133 | size_t request_size, enum tb_cfg_pkg_type request_type, void *response, |
| 134 | size_t response_size, enum tb_cfg_pkg_type response_type, |
| 135 | unsigned int timeout_msec) |
| 136 | { |
| 137 | struct tb_cfg_request *req; |
| 138 | struct tb_cfg_result res; |
| 139 | |
| 140 | req = tb_cfg_request_alloc(); |
| 141 | if (!req) |
| 142 | return -ENOMEM; |
| 143 | |
| 144 | req->match = tb_xdomain_match; |
| 145 | req->copy = tb_xdomain_copy; |
| 146 | req->request = request; |
| 147 | req->request_size = request_size; |
| 148 | req->request_type = request_type; |
| 149 | req->response = response; |
| 150 | req->response_size = response_size; |
| 151 | req->response_type = response_type; |
| 152 | |
| 153 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
| 154 | |
| 155 | tb_cfg_request_put(req); |
| 156 | |
| 157 | return res.err == 1 ? -EIO : res.err; |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * tb_xdomain_request() - Send a XDomain request |
| 162 | * @xd: XDomain to send the request |
| 163 | * @request: Request to send |
| 164 | * @request_size: Size of the request in bytes |
| 165 | * @request_type: PDF type of the request |
| 166 | * @response: Response is copied here |
| 167 | * @response_size: Expected size of the response in bytes |
| 168 | * @response_type: Expected PDF type of the response |
| 169 | * @timeout_msec: Timeout in milliseconds to wait for the response |
| 170 | * |
| 171 | * This function can be used to send XDomain control channel messages to |
| 172 | * the other domain. The function waits until the response is received |
| 173 | * or when timeout triggers. Whichever comes first. |
| 174 | * |
| 175 | * Return: %0 in case of success and negative errno in case of failure |
| 176 | */ |
| 177 | int tb_xdomain_request(struct tb_xdomain *xd, const void *request, |
| 178 | size_t request_size, enum tb_cfg_pkg_type request_type, |
| 179 | void *response, size_t response_size, |
| 180 | enum tb_cfg_pkg_type response_type, unsigned int timeout_msec) |
| 181 | { |
| 182 | return __tb_xdomain_request(xd->tb->ctl, request, request_size, |
| 183 | request_type, response, response_size, |
| 184 | response_type, timeout_msec); |
| 185 | } |
| 186 | EXPORT_SYMBOL_GPL(tb_xdomain_request); |
| 187 | |
| 188 | static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, |
| 189 | u8 sequence, enum tb_xdp_type type, size_t size) |
| 190 | { |
| 191 | u32 length_sn; |
| 192 | |
| 193 | length_sn = (size - sizeof(hdr->xd_hdr)) / 4; |
| 194 | length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK; |
| 195 | |
| 196 | hdr->xd_hdr.route_hi = upper_32_bits(route); |
| 197 | hdr->xd_hdr.route_lo = lower_32_bits(route); |
| 198 | hdr->xd_hdr.length_sn = length_sn; |
| 199 | hdr->type = type; |
| 200 | memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); |
| 201 | } |
| 202 | |
| 203 | static int tb_xdp_handle_error(const struct tb_xdp_header *hdr) |
| 204 | { |
| 205 | const struct tb_xdp_error_response *error; |
| 206 | |
| 207 | if (hdr->type != ERROR_RESPONSE) |
| 208 | return 0; |
| 209 | |
| 210 | error = (const struct tb_xdp_error_response *)hdr; |
| 211 | |
| 212 | switch (error->error) { |
| 213 | case ERROR_UNKNOWN_PACKET: |
| 214 | case ERROR_UNKNOWN_DOMAIN: |
| 215 | return -EIO; |
| 216 | case ERROR_NOT_SUPPORTED: |
| 217 | return -ENOTSUPP; |
| 218 | case ERROR_NOT_READY: |
| 219 | return -EAGAIN; |
| 220 | default: |
| 221 | break; |
| 222 | } |
| 223 | |
| 224 | return 0; |
| 225 | } |
| 226 | |
| 227 | static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, |
| 228 | enum tb_xdp_error error) |
| 229 | { |
| 230 | struct tb_xdp_error_response res; |
| 231 | |
| 232 | memset(&res, 0, sizeof(res)); |
| 233 | tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE, |
| 234 | sizeof(res)); |
| 235 | res.error = error; |
| 236 | |
| 237 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 238 | TB_CFG_PKG_XDOMAIN_RESP); |
| 239 | } |
| 240 | |
| 241 | static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, |
| 242 | const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry, |
| 243 | u32 **block, u32 *generation) |
| 244 | { |
| 245 | struct tb_xdp_properties_response *res; |
| 246 | struct tb_xdp_properties req; |
| 247 | u16 data_len, len; |
| 248 | size_t total_size; |
| 249 | u32 *data = NULL; |
| 250 | int ret; |
| 251 | |
| 252 | total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4; |
| 253 | res = kzalloc(total_size, GFP_KERNEL); |
| 254 | if (!res) |
| 255 | return -ENOMEM; |
| 256 | |
| 257 | memset(&req, 0, sizeof(req)); |
| 258 | tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST, |
| 259 | sizeof(req)); |
| 260 | memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); |
| 261 | memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); |
| 262 | |
| 263 | len = 0; |
| 264 | data_len = 0; |
| 265 | |
| 266 | do { |
| 267 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 268 | TB_CFG_PKG_XDOMAIN_REQ, res, |
| 269 | total_size, TB_CFG_PKG_XDOMAIN_RESP, |
| 270 | XDOMAIN_DEFAULT_TIMEOUT); |
| 271 | if (ret) |
| 272 | goto err; |
| 273 | |
| 274 | ret = tb_xdp_handle_error(&res->hdr); |
| 275 | if (ret) |
| 276 | goto err; |
| 277 | |
| 278 | /* |
| 279 | * Package length includes the whole payload without the |
| 280 | * XDomain header. Validate first that the package is at |
| 281 | * least size of the response structure. |
| 282 | */ |
| 283 | len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
| 284 | if (len < sizeof(*res) / 4) { |
| 285 | ret = -EINVAL; |
| 286 | goto err; |
| 287 | } |
| 288 | |
| 289 | len += sizeof(res->hdr.xd_hdr) / 4; |
| 290 | len -= sizeof(*res) / 4; |
| 291 | |
| 292 | if (res->offset != req.offset) { |
| 293 | ret = -EINVAL; |
| 294 | goto err; |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * First time allocate block that has enough space for |
| 299 | * the whole properties block. |
| 300 | */ |
| 301 | if (!data) { |
| 302 | data_len = res->data_length; |
| 303 | if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) { |
| 304 | ret = -E2BIG; |
| 305 | goto err; |
| 306 | } |
| 307 | |
| 308 | data = kcalloc(data_len, sizeof(u32), GFP_KERNEL); |
| 309 | if (!data) { |
| 310 | ret = -ENOMEM; |
| 311 | goto err; |
| 312 | } |
| 313 | } |
| 314 | |
| 315 | memcpy(data + req.offset, res->data, len * 4); |
| 316 | req.offset += len; |
| 317 | } while (!data_len || req.offset < data_len); |
| 318 | |
| 319 | *block = data; |
| 320 | *generation = res->generation; |
| 321 | |
| 322 | kfree(res); |
| 323 | |
| 324 | return data_len; |
| 325 | |
| 326 | err: |
| 327 | kfree(data); |
| 328 | kfree(res); |
| 329 | |
| 330 | return ret; |
| 331 | } |
| 332 | |
| 333 | static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, |
| 334 | u64 route, u8 sequence, const uuid_t *src_uuid, |
| 335 | const struct tb_xdp_properties *req) |
| 336 | { |
| 337 | struct tb_xdp_properties_response *res; |
| 338 | size_t total_size; |
| 339 | u16 len; |
| 340 | int ret; |
| 341 | |
| 342 | /* |
| 343 | * Currently we expect all requests to be directed to us. The |
| 344 | * protocol supports forwarding, though which we might add |
| 345 | * support later on. |
| 346 | */ |
| 347 | if (!uuid_equal(src_uuid, &req->dst_uuid)) { |
| 348 | tb_xdp_error_response(ctl, route, sequence, |
| 349 | ERROR_UNKNOWN_DOMAIN); |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | mutex_lock(&xdomain_lock); |
| 354 | |
| 355 | if (req->offset >= xdomain_property_block_len) { |
| 356 | mutex_unlock(&xdomain_lock); |
| 357 | return -EINVAL; |
| 358 | } |
| 359 | |
| 360 | len = xdomain_property_block_len - req->offset; |
| 361 | len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); |
| 362 | total_size = sizeof(*res) + len * 4; |
| 363 | |
| 364 | res = kzalloc(total_size, GFP_KERNEL); |
| 365 | if (!res) { |
| 366 | mutex_unlock(&xdomain_lock); |
| 367 | return -ENOMEM; |
| 368 | } |
| 369 | |
| 370 | tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE, |
| 371 | total_size); |
| 372 | res->generation = xdomain_property_block_gen; |
| 373 | res->data_length = xdomain_property_block_len; |
| 374 | res->offset = req->offset; |
| 375 | uuid_copy(&res->src_uuid, src_uuid); |
| 376 | uuid_copy(&res->dst_uuid, &req->src_uuid); |
| 377 | memcpy(res->data, &xdomain_property_block[req->offset], len * 4); |
| 378 | |
| 379 | mutex_unlock(&xdomain_lock); |
| 380 | |
| 381 | ret = __tb_xdomain_response(ctl, res, total_size, |
| 382 | TB_CFG_PKG_XDOMAIN_RESP); |
| 383 | |
| 384 | kfree(res); |
| 385 | return ret; |
| 386 | } |
| 387 | |
| 388 | static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, |
| 389 | int retry, const uuid_t *uuid) |
| 390 | { |
| 391 | struct tb_xdp_properties_changed_response res; |
| 392 | struct tb_xdp_properties_changed req; |
| 393 | int ret; |
| 394 | |
| 395 | memset(&req, 0, sizeof(req)); |
| 396 | tb_xdp_fill_header(&req.hdr, route, retry % 4, |
| 397 | PROPERTIES_CHANGED_REQUEST, sizeof(req)); |
| 398 | uuid_copy(&req.src_uuid, uuid); |
| 399 | |
| 400 | memset(&res, 0, sizeof(res)); |
| 401 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), |
| 402 | TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), |
| 403 | TB_CFG_PKG_XDOMAIN_RESP, |
| 404 | XDOMAIN_DEFAULT_TIMEOUT); |
| 405 | if (ret) |
| 406 | return ret; |
| 407 | |
| 408 | return tb_xdp_handle_error(&res.hdr); |
| 409 | } |
| 410 | |
| 411 | static int |
| 412 | tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) |
| 413 | { |
| 414 | struct tb_xdp_properties_changed_response res; |
| 415 | |
| 416 | memset(&res, 0, sizeof(res)); |
| 417 | tb_xdp_fill_header(&res.hdr, route, sequence, |
| 418 | PROPERTIES_CHANGED_RESPONSE, sizeof(res)); |
| 419 | return __tb_xdomain_response(ctl, &res, sizeof(res), |
| 420 | TB_CFG_PKG_XDOMAIN_RESP); |
| 421 | } |
| 422 | |
| 423 | /** |
| 424 | * tb_register_protocol_handler() - Register protocol handler |
| 425 | * @handler: Handler to register |
| 426 | * |
| 427 | * This allows XDomain service drivers to hook into incoming XDomain |
| 428 | * messages. After this function is called the service driver needs to |
| 429 | * be able to handle calls to callback whenever a package with the |
| 430 | * registered protocol is received. |
| 431 | */ |
| 432 | int tb_register_protocol_handler(struct tb_protocol_handler *handler) |
| 433 | { |
| 434 | if (!handler->uuid || !handler->callback) |
| 435 | return -EINVAL; |
| 436 | if (uuid_equal(handler->uuid, &tb_xdp_uuid)) |
| 437 | return -EINVAL; |
| 438 | |
| 439 | mutex_lock(&xdomain_lock); |
| 440 | list_add_tail(&handler->list, &protocol_handlers); |
| 441 | mutex_unlock(&xdomain_lock); |
| 442 | |
| 443 | return 0; |
| 444 | } |
| 445 | EXPORT_SYMBOL_GPL(tb_register_protocol_handler); |
| 446 | |
| 447 | /** |
| 448 | * tb_unregister_protocol_handler() - Unregister protocol handler |
| 449 | * @handler: Handler to unregister |
| 450 | * |
| 451 | * Removes the previously registered protocol handler. |
| 452 | */ |
| 453 | void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) |
| 454 | { |
| 455 | mutex_lock(&xdomain_lock); |
| 456 | list_del_init(&handler->list); |
| 457 | mutex_unlock(&xdomain_lock); |
| 458 | } |
| 459 | EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); |
| 460 | |
| 461 | static void tb_xdp_handle_request(struct work_struct *work) |
| 462 | { |
| 463 | struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); |
| 464 | const struct tb_xdp_header *pkg = xw->pkg; |
| 465 | const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; |
| 466 | struct tb *tb = xw->tb; |
| 467 | struct tb_ctl *ctl = tb->ctl; |
| 468 | const uuid_t *uuid; |
| 469 | int ret = 0; |
Dan Carpenter | 9a03c3d | 2017-10-17 15:32:17 +0300 | [diff] [blame] | 470 | u32 sequence; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 471 | u64 route; |
| 472 | |
| 473 | route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); |
| 474 | sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK; |
| 475 | sequence >>= TB_XDOMAIN_SN_SHIFT; |
| 476 | |
| 477 | mutex_lock(&tb->lock); |
| 478 | if (tb->root_switch) |
| 479 | uuid = tb->root_switch->uuid; |
| 480 | else |
| 481 | uuid = NULL; |
| 482 | mutex_unlock(&tb->lock); |
| 483 | |
| 484 | if (!uuid) { |
| 485 | tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); |
| 486 | goto out; |
| 487 | } |
| 488 | |
| 489 | switch (pkg->type) { |
| 490 | case PROPERTIES_REQUEST: |
| 491 | ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid, |
| 492 | (const struct tb_xdp_properties *)pkg); |
| 493 | break; |
| 494 | |
| 495 | case PROPERTIES_CHANGED_REQUEST: { |
| 496 | const struct tb_xdp_properties_changed *xchg = |
| 497 | (const struct tb_xdp_properties_changed *)pkg; |
| 498 | struct tb_xdomain *xd; |
| 499 | |
| 500 | ret = tb_xdp_properties_changed_response(ctl, route, sequence); |
| 501 | |
| 502 | /* |
| 503 | * Since the properties have been changed, let's update |
| 504 | * the xdomain related to this connection as well in |
| 505 | * case there is a change in services it offers. |
| 506 | */ |
| 507 | xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); |
| 508 | if (xd) { |
| 509 | queue_delayed_work(tb->wq, &xd->get_properties_work, |
| 510 | msecs_to_jiffies(50)); |
| 511 | tb_xdomain_put(xd); |
| 512 | } |
| 513 | |
| 514 | break; |
| 515 | } |
| 516 | |
| 517 | default: |
| 518 | break; |
| 519 | } |
| 520 | |
| 521 | if (ret) { |
| 522 | tb_warn(tb, "failed to send XDomain response for %#x\n", |
| 523 | pkg->type); |
| 524 | } |
| 525 | |
| 526 | out: |
| 527 | kfree(xw->pkg); |
| 528 | kfree(xw); |
| 529 | } |
| 530 | |
| 531 | static void |
| 532 | tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, |
| 533 | size_t size) |
| 534 | { |
| 535 | struct xdomain_request_work *xw; |
| 536 | |
| 537 | xw = kmalloc(sizeof(*xw), GFP_KERNEL); |
| 538 | if (!xw) |
| 539 | return; |
| 540 | |
| 541 | INIT_WORK(&xw->work, tb_xdp_handle_request); |
| 542 | xw->pkg = kmemdup(hdr, size, GFP_KERNEL); |
| 543 | xw->tb = tb; |
| 544 | |
| 545 | queue_work(tb->wq, &xw->work); |
| 546 | } |
| 547 | |
| 548 | /** |
| 549 | * tb_register_service_driver() - Register XDomain service driver |
| 550 | * @drv: Driver to register |
| 551 | * |
| 552 | * Registers new service driver from @drv to the bus. |
| 553 | */ |
| 554 | int tb_register_service_driver(struct tb_service_driver *drv) |
| 555 | { |
| 556 | drv->driver.bus = &tb_bus_type; |
| 557 | return driver_register(&drv->driver); |
| 558 | } |
| 559 | EXPORT_SYMBOL_GPL(tb_register_service_driver); |
| 560 | |
| 561 | /** |
| 562 | * tb_unregister_service_driver() - Unregister XDomain service driver |
| 563 | * @xdrv: Driver to unregister |
| 564 | * |
| 565 | * Unregisters XDomain service driver from the bus. |
| 566 | */ |
| 567 | void tb_unregister_service_driver(struct tb_service_driver *drv) |
| 568 | { |
| 569 | driver_unregister(&drv->driver); |
| 570 | } |
| 571 | EXPORT_SYMBOL_GPL(tb_unregister_service_driver); |
| 572 | |
| 573 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
| 574 | char *buf) |
| 575 | { |
| 576 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 577 | |
| 578 | /* |
| 579 | * It should be null terminated but anything else is pretty much |
| 580 | * allowed. |
| 581 | */ |
| 582 | return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key); |
| 583 | } |
| 584 | static DEVICE_ATTR_RO(key); |
| 585 | |
| 586 | static int get_modalias(struct tb_service *svc, char *buf, size_t size) |
| 587 | { |
| 588 | return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key, |
| 589 | svc->prtcid, svc->prtcvers, svc->prtcrevs); |
| 590 | } |
| 591 | |
| 592 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
| 593 | char *buf) |
| 594 | { |
| 595 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 596 | |
| 597 | /* Full buffer size except new line and null termination */ |
| 598 | get_modalias(svc, buf, PAGE_SIZE - 2); |
| 599 | return sprintf(buf, "%s\n", buf); |
| 600 | } |
| 601 | static DEVICE_ATTR_RO(modalias); |
| 602 | |
| 603 | static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, |
| 604 | char *buf) |
| 605 | { |
| 606 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 607 | |
| 608 | return sprintf(buf, "%u\n", svc->prtcid); |
| 609 | } |
| 610 | static DEVICE_ATTR_RO(prtcid); |
| 611 | |
| 612 | static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, |
| 613 | char *buf) |
| 614 | { |
| 615 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 616 | |
| 617 | return sprintf(buf, "%u\n", svc->prtcvers); |
| 618 | } |
| 619 | static DEVICE_ATTR_RO(prtcvers); |
| 620 | |
| 621 | static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, |
| 622 | char *buf) |
| 623 | { |
| 624 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 625 | |
| 626 | return sprintf(buf, "%u\n", svc->prtcrevs); |
| 627 | } |
| 628 | static DEVICE_ATTR_RO(prtcrevs); |
| 629 | |
| 630 | static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, |
| 631 | char *buf) |
| 632 | { |
| 633 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 634 | |
| 635 | return sprintf(buf, "0x%08x\n", svc->prtcstns); |
| 636 | } |
| 637 | static DEVICE_ATTR_RO(prtcstns); |
| 638 | |
| 639 | static struct attribute *tb_service_attrs[] = { |
| 640 | &dev_attr_key.attr, |
| 641 | &dev_attr_modalias.attr, |
| 642 | &dev_attr_prtcid.attr, |
| 643 | &dev_attr_prtcvers.attr, |
| 644 | &dev_attr_prtcrevs.attr, |
| 645 | &dev_attr_prtcstns.attr, |
| 646 | NULL, |
| 647 | }; |
| 648 | |
| 649 | static struct attribute_group tb_service_attr_group = { |
| 650 | .attrs = tb_service_attrs, |
| 651 | }; |
| 652 | |
| 653 | static const struct attribute_group *tb_service_attr_groups[] = { |
| 654 | &tb_service_attr_group, |
| 655 | NULL, |
| 656 | }; |
| 657 | |
| 658 | static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env) |
| 659 | { |
| 660 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 661 | char modalias[64]; |
| 662 | |
| 663 | get_modalias(svc, modalias, sizeof(modalias)); |
| 664 | return add_uevent_var(env, "MODALIAS=%s", modalias); |
| 665 | } |
| 666 | |
| 667 | static void tb_service_release(struct device *dev) |
| 668 | { |
| 669 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
| 670 | struct tb_xdomain *xd = tb_service_parent(svc); |
| 671 | |
| 672 | ida_simple_remove(&xd->service_ids, svc->id); |
| 673 | kfree(svc->key); |
| 674 | kfree(svc); |
| 675 | } |
| 676 | |
| 677 | struct device_type tb_service_type = { |
| 678 | .name = "thunderbolt_service", |
| 679 | .groups = tb_service_attr_groups, |
| 680 | .uevent = tb_service_uevent, |
| 681 | .release = tb_service_release, |
| 682 | }; |
| 683 | EXPORT_SYMBOL_GPL(tb_service_type); |
| 684 | |
| 685 | static int remove_missing_service(struct device *dev, void *data) |
| 686 | { |
| 687 | struct tb_xdomain *xd = data; |
| 688 | struct tb_service *svc; |
| 689 | |
| 690 | svc = tb_to_service(dev); |
| 691 | if (!svc) |
| 692 | return 0; |
| 693 | |
| 694 | if (!tb_property_find(xd->properties, svc->key, |
| 695 | TB_PROPERTY_TYPE_DIRECTORY)) |
| 696 | device_unregister(dev); |
| 697 | |
| 698 | return 0; |
| 699 | } |
| 700 | |
| 701 | static int find_service(struct device *dev, void *data) |
| 702 | { |
| 703 | const struct tb_property *p = data; |
| 704 | struct tb_service *svc; |
| 705 | |
| 706 | svc = tb_to_service(dev); |
| 707 | if (!svc) |
| 708 | return 0; |
| 709 | |
| 710 | return !strcmp(svc->key, p->key); |
| 711 | } |
| 712 | |
| 713 | static int populate_service(struct tb_service *svc, |
| 714 | struct tb_property *property) |
| 715 | { |
| 716 | struct tb_property_dir *dir = property->value.dir; |
| 717 | struct tb_property *p; |
| 718 | |
| 719 | /* Fill in standard properties */ |
| 720 | p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE); |
| 721 | if (p) |
| 722 | svc->prtcid = p->value.immediate; |
| 723 | p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); |
| 724 | if (p) |
| 725 | svc->prtcvers = p->value.immediate; |
| 726 | p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); |
| 727 | if (p) |
| 728 | svc->prtcrevs = p->value.immediate; |
| 729 | p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); |
| 730 | if (p) |
| 731 | svc->prtcstns = p->value.immediate; |
| 732 | |
| 733 | svc->key = kstrdup(property->key, GFP_KERNEL); |
| 734 | if (!svc->key) |
| 735 | return -ENOMEM; |
| 736 | |
| 737 | return 0; |
| 738 | } |
| 739 | |
| 740 | static void enumerate_services(struct tb_xdomain *xd) |
| 741 | { |
| 742 | struct tb_service *svc; |
| 743 | struct tb_property *p; |
| 744 | struct device *dev; |
| 745 | |
| 746 | /* |
| 747 | * First remove all services that are not available anymore in |
| 748 | * the updated property block. |
| 749 | */ |
| 750 | device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); |
| 751 | |
| 752 | /* Then re-enumerate properties creating new services as we go */ |
| 753 | tb_property_for_each(xd->properties, p) { |
| 754 | if (p->type != TB_PROPERTY_TYPE_DIRECTORY) |
| 755 | continue; |
| 756 | |
| 757 | /* If the service exists already we are fine */ |
| 758 | dev = device_find_child(&xd->dev, p, find_service); |
| 759 | if (dev) { |
| 760 | put_device(dev); |
| 761 | continue; |
| 762 | } |
| 763 | |
| 764 | svc = kzalloc(sizeof(*svc), GFP_KERNEL); |
| 765 | if (!svc) |
| 766 | break; |
| 767 | |
| 768 | if (populate_service(svc, p)) { |
| 769 | kfree(svc); |
| 770 | break; |
| 771 | } |
| 772 | |
| 773 | svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); |
| 774 | svc->dev.bus = &tb_bus_type; |
| 775 | svc->dev.type = &tb_service_type; |
| 776 | svc->dev.parent = &xd->dev; |
| 777 | dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); |
| 778 | |
| 779 | if (device_register(&svc->dev)) { |
| 780 | put_device(&svc->dev); |
| 781 | break; |
| 782 | } |
| 783 | } |
| 784 | } |
| 785 | |
| 786 | static int populate_properties(struct tb_xdomain *xd, |
| 787 | struct tb_property_dir *dir) |
| 788 | { |
| 789 | const struct tb_property *p; |
| 790 | |
| 791 | /* Required properties */ |
| 792 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); |
| 793 | if (!p) |
| 794 | return -EINVAL; |
| 795 | xd->device = p->value.immediate; |
| 796 | |
| 797 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); |
| 798 | if (!p) |
| 799 | return -EINVAL; |
| 800 | xd->vendor = p->value.immediate; |
| 801 | |
| 802 | kfree(xd->device_name); |
| 803 | xd->device_name = NULL; |
| 804 | kfree(xd->vendor_name); |
| 805 | xd->vendor_name = NULL; |
| 806 | |
| 807 | /* Optional properties */ |
| 808 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); |
| 809 | if (p) |
| 810 | xd->device_name = kstrdup(p->value.text, GFP_KERNEL); |
| 811 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); |
| 812 | if (p) |
| 813 | xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); |
| 814 | |
| 815 | return 0; |
| 816 | } |
| 817 | |
| 818 | /* Called with @xd->lock held */ |
| 819 | static void tb_xdomain_restore_paths(struct tb_xdomain *xd) |
| 820 | { |
| 821 | if (!xd->resume) |
| 822 | return; |
| 823 | |
| 824 | xd->resume = false; |
| 825 | if (xd->transmit_path) { |
| 826 | dev_dbg(&xd->dev, "re-establishing DMA path\n"); |
| 827 | tb_domain_approve_xdomain_paths(xd->tb, xd); |
| 828 | } |
| 829 | } |
| 830 | |
| 831 | static void tb_xdomain_get_properties(struct work_struct *work) |
| 832 | { |
| 833 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 834 | get_properties_work.work); |
| 835 | struct tb_property_dir *dir; |
| 836 | struct tb *tb = xd->tb; |
| 837 | bool update = false; |
| 838 | u32 *block = NULL; |
| 839 | u32 gen = 0; |
| 840 | int ret; |
| 841 | |
| 842 | ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, |
| 843 | xd->remote_uuid, xd->properties_retries, |
| 844 | &block, &gen); |
| 845 | if (ret < 0) { |
| 846 | if (xd->properties_retries-- > 0) { |
| 847 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 848 | msecs_to_jiffies(1000)); |
| 849 | } else { |
| 850 | /* Give up now */ |
| 851 | dev_err(&xd->dev, |
| 852 | "failed read XDomain properties from %pUb\n", |
| 853 | xd->remote_uuid); |
| 854 | } |
| 855 | return; |
| 856 | } |
| 857 | |
| 858 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; |
| 859 | |
| 860 | mutex_lock(&xd->lock); |
| 861 | |
| 862 | /* Only accept newer generation properties */ |
| 863 | if (xd->properties && gen <= xd->property_block_gen) { |
| 864 | /* |
| 865 | * On resume it is likely that the properties block is |
| 866 | * not changed (unless the other end added or removed |
| 867 | * services). However, we need to make sure the existing |
| 868 | * DMA paths are restored properly. |
| 869 | */ |
| 870 | tb_xdomain_restore_paths(xd); |
| 871 | goto err_free_block; |
| 872 | } |
| 873 | |
| 874 | dir = tb_property_parse_dir(block, ret); |
| 875 | if (!dir) { |
| 876 | dev_err(&xd->dev, "failed to parse XDomain properties\n"); |
| 877 | goto err_free_block; |
| 878 | } |
| 879 | |
| 880 | ret = populate_properties(xd, dir); |
| 881 | if (ret) { |
| 882 | dev_err(&xd->dev, "missing XDomain properties in response\n"); |
| 883 | goto err_free_dir; |
| 884 | } |
| 885 | |
| 886 | /* Release the existing one */ |
| 887 | if (xd->properties) { |
| 888 | tb_property_free_dir(xd->properties); |
| 889 | update = true; |
| 890 | } |
| 891 | |
| 892 | xd->properties = dir; |
| 893 | xd->property_block_gen = gen; |
| 894 | |
| 895 | tb_xdomain_restore_paths(xd); |
| 896 | |
| 897 | mutex_unlock(&xd->lock); |
| 898 | |
| 899 | kfree(block); |
| 900 | |
| 901 | /* |
| 902 | * Now the device should be ready enough so we can add it to the |
| 903 | * bus and let userspace know about it. If the device is already |
| 904 | * registered, we notify the userspace that it has changed. |
| 905 | */ |
| 906 | if (!update) { |
| 907 | if (device_add(&xd->dev)) { |
| 908 | dev_err(&xd->dev, "failed to add XDomain device\n"); |
| 909 | return; |
| 910 | } |
| 911 | } else { |
| 912 | kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); |
| 913 | } |
| 914 | |
| 915 | enumerate_services(xd); |
| 916 | return; |
| 917 | |
| 918 | err_free_dir: |
| 919 | tb_property_free_dir(dir); |
| 920 | err_free_block: |
| 921 | kfree(block); |
| 922 | mutex_unlock(&xd->lock); |
| 923 | } |
| 924 | |
| 925 | static void tb_xdomain_properties_changed(struct work_struct *work) |
| 926 | { |
| 927 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
| 928 | properties_changed_work.work); |
| 929 | int ret; |
| 930 | |
| 931 | ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, |
| 932 | xd->properties_changed_retries, xd->local_uuid); |
| 933 | if (ret) { |
| 934 | if (xd->properties_changed_retries-- > 0) |
| 935 | queue_delayed_work(xd->tb->wq, |
| 936 | &xd->properties_changed_work, |
| 937 | msecs_to_jiffies(1000)); |
| 938 | return; |
| 939 | } |
| 940 | |
| 941 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; |
| 942 | } |
| 943 | |
| 944 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
| 945 | char *buf) |
| 946 | { |
| 947 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 948 | |
| 949 | return sprintf(buf, "%#x\n", xd->device); |
| 950 | } |
| 951 | static DEVICE_ATTR_RO(device); |
| 952 | |
| 953 | static ssize_t |
| 954 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
| 955 | { |
| 956 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 957 | int ret; |
| 958 | |
| 959 | if (mutex_lock_interruptible(&xd->lock)) |
| 960 | return -ERESTARTSYS; |
| 961 | ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); |
| 962 | mutex_unlock(&xd->lock); |
| 963 | |
| 964 | return ret; |
| 965 | } |
| 966 | static DEVICE_ATTR_RO(device_name); |
| 967 | |
| 968 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
| 969 | char *buf) |
| 970 | { |
| 971 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 972 | |
| 973 | return sprintf(buf, "%#x\n", xd->vendor); |
| 974 | } |
| 975 | static DEVICE_ATTR_RO(vendor); |
| 976 | |
| 977 | static ssize_t |
| 978 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
| 979 | { |
| 980 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 981 | int ret; |
| 982 | |
| 983 | if (mutex_lock_interruptible(&xd->lock)) |
| 984 | return -ERESTARTSYS; |
| 985 | ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); |
| 986 | mutex_unlock(&xd->lock); |
| 987 | |
| 988 | return ret; |
| 989 | } |
| 990 | static DEVICE_ATTR_RO(vendor_name); |
| 991 | |
| 992 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
| 993 | char *buf) |
| 994 | { |
| 995 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 996 | |
| 997 | return sprintf(buf, "%pUb\n", xd->remote_uuid); |
| 998 | } |
| 999 | static DEVICE_ATTR_RO(unique_id); |
| 1000 | |
| 1001 | static struct attribute *xdomain_attrs[] = { |
| 1002 | &dev_attr_device.attr, |
| 1003 | &dev_attr_device_name.attr, |
| 1004 | &dev_attr_unique_id.attr, |
| 1005 | &dev_attr_vendor.attr, |
| 1006 | &dev_attr_vendor_name.attr, |
| 1007 | NULL, |
| 1008 | }; |
| 1009 | |
| 1010 | static struct attribute_group xdomain_attr_group = { |
| 1011 | .attrs = xdomain_attrs, |
| 1012 | }; |
| 1013 | |
| 1014 | static const struct attribute_group *xdomain_attr_groups[] = { |
| 1015 | &xdomain_attr_group, |
| 1016 | NULL, |
| 1017 | }; |
| 1018 | |
| 1019 | static void tb_xdomain_release(struct device *dev) |
| 1020 | { |
| 1021 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
| 1022 | |
| 1023 | put_device(xd->dev.parent); |
| 1024 | |
| 1025 | tb_property_free_dir(xd->properties); |
| 1026 | ida_destroy(&xd->service_ids); |
| 1027 | |
| 1028 | kfree(xd->local_uuid); |
| 1029 | kfree(xd->remote_uuid); |
| 1030 | kfree(xd->device_name); |
| 1031 | kfree(xd->vendor_name); |
| 1032 | kfree(xd); |
| 1033 | } |
| 1034 | |
| 1035 | static void start_handshake(struct tb_xdomain *xd) |
| 1036 | { |
| 1037 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; |
| 1038 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; |
| 1039 | |
| 1040 | /* Start exchanging properties with the other host */ |
| 1041 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 1042 | msecs_to_jiffies(100)); |
| 1043 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, |
| 1044 | msecs_to_jiffies(1000)); |
| 1045 | } |
| 1046 | |
| 1047 | static void stop_handshake(struct tb_xdomain *xd) |
| 1048 | { |
| 1049 | xd->properties_retries = 0; |
| 1050 | xd->properties_changed_retries = 0; |
| 1051 | |
| 1052 | cancel_delayed_work_sync(&xd->get_properties_work); |
| 1053 | cancel_delayed_work_sync(&xd->properties_changed_work); |
| 1054 | } |
| 1055 | |
| 1056 | static int __maybe_unused tb_xdomain_suspend(struct device *dev) |
| 1057 | { |
| 1058 | stop_handshake(tb_to_xdomain(dev)); |
| 1059 | return 0; |
| 1060 | } |
| 1061 | |
| 1062 | static int __maybe_unused tb_xdomain_resume(struct device *dev) |
| 1063 | { |
| 1064 | struct tb_xdomain *xd = tb_to_xdomain(dev); |
| 1065 | |
| 1066 | /* |
| 1067 | * Ask tb_xdomain_get_properties() restore any existing DMA |
| 1068 | * paths after properties are re-read. |
| 1069 | */ |
| 1070 | xd->resume = true; |
| 1071 | start_handshake(xd); |
| 1072 | |
| 1073 | return 0; |
| 1074 | } |
| 1075 | |
| 1076 | static const struct dev_pm_ops tb_xdomain_pm_ops = { |
| 1077 | SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume) |
| 1078 | }; |
| 1079 | |
| 1080 | struct device_type tb_xdomain_type = { |
| 1081 | .name = "thunderbolt_xdomain", |
| 1082 | .release = tb_xdomain_release, |
| 1083 | .pm = &tb_xdomain_pm_ops, |
| 1084 | }; |
| 1085 | EXPORT_SYMBOL_GPL(tb_xdomain_type); |
| 1086 | |
| 1087 | /** |
| 1088 | * tb_xdomain_alloc() - Allocate new XDomain object |
| 1089 | * @tb: Domain where the XDomain belongs |
| 1090 | * @parent: Parent device (the switch through the connection to the |
| 1091 | * other domain is reached). |
| 1092 | * @route: Route string used to reach the other domain |
| 1093 | * @local_uuid: Our local domain UUID |
| 1094 | * @remote_uuid: UUID of the other domain |
| 1095 | * |
| 1096 | * Allocates new XDomain structure and returns pointer to that. The |
| 1097 | * object must be released by calling tb_xdomain_put(). |
| 1098 | */ |
| 1099 | struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, |
| 1100 | u64 route, const uuid_t *local_uuid, |
| 1101 | const uuid_t *remote_uuid) |
| 1102 | { |
| 1103 | struct tb_xdomain *xd; |
| 1104 | |
| 1105 | xd = kzalloc(sizeof(*xd), GFP_KERNEL); |
| 1106 | if (!xd) |
| 1107 | return NULL; |
| 1108 | |
| 1109 | xd->tb = tb; |
| 1110 | xd->route = route; |
| 1111 | ida_init(&xd->service_ids); |
| 1112 | mutex_init(&xd->lock); |
| 1113 | INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); |
| 1114 | INIT_DELAYED_WORK(&xd->properties_changed_work, |
| 1115 | tb_xdomain_properties_changed); |
| 1116 | |
| 1117 | xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); |
| 1118 | if (!xd->local_uuid) |
| 1119 | goto err_free; |
| 1120 | |
| 1121 | xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL); |
| 1122 | if (!xd->remote_uuid) |
| 1123 | goto err_free_local_uuid; |
| 1124 | |
| 1125 | device_initialize(&xd->dev); |
| 1126 | xd->dev.parent = get_device(parent); |
| 1127 | xd->dev.bus = &tb_bus_type; |
| 1128 | xd->dev.type = &tb_xdomain_type; |
| 1129 | xd->dev.groups = xdomain_attr_groups; |
| 1130 | dev_set_name(&xd->dev, "%u-%llx", tb->index, route); |
| 1131 | |
| 1132 | return xd; |
| 1133 | |
| 1134 | err_free_local_uuid: |
| 1135 | kfree(xd->local_uuid); |
| 1136 | err_free: |
| 1137 | kfree(xd); |
| 1138 | |
| 1139 | return NULL; |
| 1140 | } |
| 1141 | |
| 1142 | /** |
| 1143 | * tb_xdomain_add() - Add XDomain to the bus |
| 1144 | * @xd: XDomain to add |
| 1145 | * |
| 1146 | * This function starts XDomain discovery protocol handshake and |
| 1147 | * eventually adds the XDomain to the bus. After calling this function |
| 1148 | * the caller needs to call tb_xdomain_remove() in order to remove and |
| 1149 | * release the object regardless whether the handshake succeeded or not. |
| 1150 | */ |
| 1151 | void tb_xdomain_add(struct tb_xdomain *xd) |
| 1152 | { |
| 1153 | /* Start exchanging properties with the other host */ |
| 1154 | start_handshake(xd); |
| 1155 | } |
| 1156 | |
| 1157 | static int unregister_service(struct device *dev, void *data) |
| 1158 | { |
| 1159 | device_unregister(dev); |
| 1160 | return 0; |
| 1161 | } |
| 1162 | |
| 1163 | /** |
| 1164 | * tb_xdomain_remove() - Remove XDomain from the bus |
| 1165 | * @xd: XDomain to remove |
| 1166 | * |
| 1167 | * This will stop all ongoing configuration work and remove the XDomain |
| 1168 | * along with any services from the bus. When the last reference to @xd |
| 1169 | * is released the object will be released as well. |
| 1170 | */ |
| 1171 | void tb_xdomain_remove(struct tb_xdomain *xd) |
| 1172 | { |
| 1173 | stop_handshake(xd); |
| 1174 | |
| 1175 | device_for_each_child_reverse(&xd->dev, xd, unregister_service); |
| 1176 | |
| 1177 | if (!device_is_registered(&xd->dev)) |
| 1178 | put_device(&xd->dev); |
| 1179 | else |
| 1180 | device_unregister(&xd->dev); |
| 1181 | } |
| 1182 | |
| 1183 | /** |
| 1184 | * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection |
| 1185 | * @xd: XDomain connection |
| 1186 | * @transmit_path: HopID of the transmit path the other end is using to |
| 1187 | * send packets |
| 1188 | * @transmit_ring: DMA ring used to receive packets from the other end |
| 1189 | * @receive_path: HopID of the receive path the other end is using to |
| 1190 | * receive packets |
| 1191 | * @receive_ring: DMA ring used to send packets to the other end |
| 1192 | * |
| 1193 | * The function enables DMA paths accordingly so that after successful |
| 1194 | * return the caller can send and receive packets using high-speed DMA |
| 1195 | * path. |
| 1196 | * |
| 1197 | * Return: %0 in case of success and negative errno in case of error |
| 1198 | */ |
| 1199 | int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, |
| 1200 | u16 transmit_ring, u16 receive_path, |
| 1201 | u16 receive_ring) |
| 1202 | { |
| 1203 | int ret; |
| 1204 | |
| 1205 | mutex_lock(&xd->lock); |
| 1206 | |
| 1207 | if (xd->transmit_path) { |
| 1208 | ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; |
| 1209 | goto exit_unlock; |
| 1210 | } |
| 1211 | |
| 1212 | xd->transmit_path = transmit_path; |
| 1213 | xd->transmit_ring = transmit_ring; |
| 1214 | xd->receive_path = receive_path; |
| 1215 | xd->receive_ring = receive_ring; |
| 1216 | |
| 1217 | ret = tb_domain_approve_xdomain_paths(xd->tb, xd); |
| 1218 | |
| 1219 | exit_unlock: |
| 1220 | mutex_unlock(&xd->lock); |
| 1221 | |
| 1222 | return ret; |
| 1223 | } |
| 1224 | EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); |
| 1225 | |
| 1226 | /** |
| 1227 | * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection |
| 1228 | * @xd: XDomain connection |
| 1229 | * |
| 1230 | * This does the opposite of tb_xdomain_enable_paths(). After call to |
| 1231 | * this the caller is not expected to use the rings anymore. |
| 1232 | * |
| 1233 | * Return: %0 in case of success and negative errno in case of error |
| 1234 | */ |
| 1235 | int tb_xdomain_disable_paths(struct tb_xdomain *xd) |
| 1236 | { |
| 1237 | int ret = 0; |
| 1238 | |
| 1239 | mutex_lock(&xd->lock); |
| 1240 | if (xd->transmit_path) { |
| 1241 | xd->transmit_path = 0; |
| 1242 | xd->transmit_ring = 0; |
| 1243 | xd->receive_path = 0; |
| 1244 | xd->receive_ring = 0; |
| 1245 | |
| 1246 | ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd); |
| 1247 | } |
| 1248 | mutex_unlock(&xd->lock); |
| 1249 | |
| 1250 | return ret; |
| 1251 | } |
| 1252 | EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); |
| 1253 | |
| 1254 | struct tb_xdomain_lookup { |
| 1255 | const uuid_t *uuid; |
| 1256 | u8 link; |
| 1257 | u8 depth; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame^] | 1258 | u64 route; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1259 | }; |
| 1260 | |
| 1261 | static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, |
| 1262 | const struct tb_xdomain_lookup *lookup) |
| 1263 | { |
| 1264 | int i; |
| 1265 | |
| 1266 | for (i = 1; i <= sw->config.max_port_number; i++) { |
| 1267 | struct tb_port *port = &sw->ports[i]; |
| 1268 | struct tb_xdomain *xd; |
| 1269 | |
| 1270 | if (tb_is_upstream_port(port)) |
| 1271 | continue; |
| 1272 | |
| 1273 | if (port->xdomain) { |
| 1274 | xd = port->xdomain; |
| 1275 | |
| 1276 | if (lookup->uuid) { |
| 1277 | if (uuid_equal(xd->remote_uuid, lookup->uuid)) |
| 1278 | return xd; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame^] | 1279 | } else if (lookup->link && |
| 1280 | lookup->link == xd->link && |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1281 | lookup->depth == xd->depth) { |
| 1282 | return xd; |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame^] | 1283 | } else if (lookup->route && |
| 1284 | lookup->route == xd->route) { |
| 1285 | return xd; |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1286 | } |
| 1287 | } else if (port->remote) { |
| 1288 | xd = switch_find_xdomain(port->remote->sw, lookup); |
| 1289 | if (xd) |
| 1290 | return xd; |
| 1291 | } |
| 1292 | } |
| 1293 | |
| 1294 | return NULL; |
| 1295 | } |
| 1296 | |
| 1297 | /** |
| 1298 | * tb_xdomain_find_by_uuid() - Find an XDomain by UUID |
| 1299 | * @tb: Domain where the XDomain belongs to |
| 1300 | * @uuid: UUID to look for |
| 1301 | * |
| 1302 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1303 | * The returned XDomain will have its reference count increased so the |
| 1304 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1305 | * object. |
| 1306 | * |
| 1307 | * This will find all XDomains including the ones that are not yet added |
| 1308 | * to the bus (handshake is still in progress). |
| 1309 | * |
| 1310 | * The caller needs to hold @tb->lock. |
| 1311 | */ |
| 1312 | struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
| 1313 | { |
| 1314 | struct tb_xdomain_lookup lookup; |
| 1315 | struct tb_xdomain *xd; |
| 1316 | |
| 1317 | memset(&lookup, 0, sizeof(lookup)); |
| 1318 | lookup.uuid = uuid; |
| 1319 | |
| 1320 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame^] | 1321 | return tb_xdomain_get(xd); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1322 | } |
| 1323 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid); |
| 1324 | |
| 1325 | /** |
| 1326 | * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth |
| 1327 | * @tb: Domain where the XDomain belongs to |
| 1328 | * @link: Root switch link number |
| 1329 | * @depth: Depth in the link |
| 1330 | * |
| 1331 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1332 | * The returned XDomain will have its reference count increased so the |
| 1333 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1334 | * object. |
| 1335 | * |
| 1336 | * This will find all XDomains including the ones that are not yet added |
| 1337 | * to the bus (handshake is still in progress). |
| 1338 | * |
| 1339 | * The caller needs to hold @tb->lock. |
| 1340 | */ |
| 1341 | struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, |
| 1342 | u8 depth) |
| 1343 | { |
| 1344 | struct tb_xdomain_lookup lookup; |
| 1345 | struct tb_xdomain *xd; |
| 1346 | |
| 1347 | memset(&lookup, 0, sizeof(lookup)); |
| 1348 | lookup.link = link; |
| 1349 | lookup.depth = depth; |
| 1350 | |
| 1351 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame^] | 1352 | return tb_xdomain_get(xd); |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1353 | } |
| 1354 | |
Radion Mirchevsky | 484cb15 | 2017-10-04 14:53:54 +0300 | [diff] [blame^] | 1355 | /** |
| 1356 | * tb_xdomain_find_by_route() - Find an XDomain by route string |
| 1357 | * @tb: Domain where the XDomain belongs to |
| 1358 | * @route: XDomain route string |
| 1359 | * |
| 1360 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
| 1361 | * The returned XDomain will have its reference count increased so the |
| 1362 | * caller needs to call tb_xdomain_put() when it is done with the |
| 1363 | * object. |
| 1364 | * |
| 1365 | * This will find all XDomains including the ones that are not yet added |
| 1366 | * to the bus (handshake is still in progress). |
| 1367 | * |
| 1368 | * The caller needs to hold @tb->lock. |
| 1369 | */ |
| 1370 | struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) |
| 1371 | { |
| 1372 | struct tb_xdomain_lookup lookup; |
| 1373 | struct tb_xdomain *xd; |
| 1374 | |
| 1375 | memset(&lookup, 0, sizeof(lookup)); |
| 1376 | lookup.route = route; |
| 1377 | |
| 1378 | xd = switch_find_xdomain(tb->root_switch, &lookup); |
| 1379 | return tb_xdomain_get(xd); |
| 1380 | } |
| 1381 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route); |
| 1382 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1383 | bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, |
| 1384 | const void *buf, size_t size) |
| 1385 | { |
| 1386 | const struct tb_protocol_handler *handler, *tmp; |
| 1387 | const struct tb_xdp_header *hdr = buf; |
| 1388 | unsigned int length; |
| 1389 | int ret = 0; |
| 1390 | |
| 1391 | /* We expect the packet is at least size of the header */ |
| 1392 | length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
| 1393 | if (length != size / 4 - sizeof(hdr->xd_hdr) / 4) |
| 1394 | return true; |
| 1395 | if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4) |
| 1396 | return true; |
| 1397 | |
| 1398 | /* |
| 1399 | * Handle XDomain discovery protocol packets directly here. For |
| 1400 | * other protocols (based on their UUID) we call registered |
| 1401 | * handlers in turn. |
| 1402 | */ |
| 1403 | if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) { |
| 1404 | if (type == TB_CFG_PKG_XDOMAIN_REQ) { |
| 1405 | tb_xdp_schedule_request(tb, hdr, size); |
| 1406 | return true; |
| 1407 | } |
| 1408 | return false; |
| 1409 | } |
| 1410 | |
| 1411 | mutex_lock(&xdomain_lock); |
| 1412 | list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) { |
| 1413 | if (!uuid_equal(&hdr->uuid, handler->uuid)) |
| 1414 | continue; |
| 1415 | |
| 1416 | mutex_unlock(&xdomain_lock); |
| 1417 | ret = handler->callback(buf, size, handler->data); |
| 1418 | mutex_lock(&xdomain_lock); |
| 1419 | |
| 1420 | if (ret) |
| 1421 | break; |
| 1422 | } |
| 1423 | mutex_unlock(&xdomain_lock); |
| 1424 | |
| 1425 | return ret > 0; |
| 1426 | } |
| 1427 | |
| 1428 | static int rebuild_property_block(void) |
| 1429 | { |
| 1430 | u32 *block, len; |
| 1431 | int ret; |
| 1432 | |
| 1433 | ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); |
| 1434 | if (ret < 0) |
| 1435 | return ret; |
| 1436 | |
| 1437 | len = ret; |
| 1438 | |
| 1439 | block = kcalloc(len, sizeof(u32), GFP_KERNEL); |
| 1440 | if (!block) |
| 1441 | return -ENOMEM; |
| 1442 | |
| 1443 | ret = tb_property_format_dir(xdomain_property_dir, block, len); |
| 1444 | if (ret) { |
| 1445 | kfree(block); |
| 1446 | return ret; |
| 1447 | } |
| 1448 | |
| 1449 | kfree(xdomain_property_block); |
| 1450 | xdomain_property_block = block; |
| 1451 | xdomain_property_block_len = len; |
| 1452 | xdomain_property_block_gen++; |
| 1453 | |
| 1454 | return 0; |
| 1455 | } |
| 1456 | |
| 1457 | static int update_xdomain(struct device *dev, void *data) |
| 1458 | { |
| 1459 | struct tb_xdomain *xd; |
| 1460 | |
| 1461 | xd = tb_to_xdomain(dev); |
| 1462 | if (xd) { |
| 1463 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, |
| 1464 | msecs_to_jiffies(50)); |
| 1465 | } |
| 1466 | |
| 1467 | return 0; |
| 1468 | } |
| 1469 | |
| 1470 | static void update_all_xdomains(void) |
| 1471 | { |
| 1472 | bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain); |
| 1473 | } |
| 1474 | |
| 1475 | static bool remove_directory(const char *key, const struct tb_property_dir *dir) |
| 1476 | { |
| 1477 | struct tb_property *p; |
| 1478 | |
| 1479 | p = tb_property_find(xdomain_property_dir, key, |
| 1480 | TB_PROPERTY_TYPE_DIRECTORY); |
| 1481 | if (p && p->value.dir == dir) { |
| 1482 | tb_property_remove(p); |
| 1483 | return true; |
| 1484 | } |
| 1485 | return false; |
| 1486 | } |
| 1487 | |
| 1488 | /** |
| 1489 | * tb_register_property_dir() - Register property directory to the host |
| 1490 | * @key: Key (name) of the directory to add |
| 1491 | * @dir: Directory to add |
| 1492 | * |
| 1493 | * Service drivers can use this function to add new property directory |
| 1494 | * to the host available properties. The other connected hosts are |
| 1495 | * notified so they can re-read properties of this host if they are |
| 1496 | * interested. |
| 1497 | * |
| 1498 | * Return: %0 on success and negative errno on failure |
| 1499 | */ |
| 1500 | int tb_register_property_dir(const char *key, struct tb_property_dir *dir) |
| 1501 | { |
| 1502 | int ret; |
| 1503 | |
Mika Westerberg | acb40d8 | 2017-10-09 16:22:34 +0300 | [diff] [blame] | 1504 | if (WARN_ON(!xdomain_property_dir)) |
| 1505 | return -EAGAIN; |
| 1506 | |
Mika Westerberg | d1ff702 | 2017-10-02 13:38:34 +0300 | [diff] [blame] | 1507 | if (!key || strlen(key) > 8) |
| 1508 | return -EINVAL; |
| 1509 | |
| 1510 | mutex_lock(&xdomain_lock); |
| 1511 | if (tb_property_find(xdomain_property_dir, key, |
| 1512 | TB_PROPERTY_TYPE_DIRECTORY)) { |
| 1513 | ret = -EEXIST; |
| 1514 | goto err_unlock; |
| 1515 | } |
| 1516 | |
| 1517 | ret = tb_property_add_dir(xdomain_property_dir, key, dir); |
| 1518 | if (ret) |
| 1519 | goto err_unlock; |
| 1520 | |
| 1521 | ret = rebuild_property_block(); |
| 1522 | if (ret) { |
| 1523 | remove_directory(key, dir); |
| 1524 | goto err_unlock; |
| 1525 | } |
| 1526 | |
| 1527 | mutex_unlock(&xdomain_lock); |
| 1528 | update_all_xdomains(); |
| 1529 | return 0; |
| 1530 | |
| 1531 | err_unlock: |
| 1532 | mutex_unlock(&xdomain_lock); |
| 1533 | return ret; |
| 1534 | } |
| 1535 | EXPORT_SYMBOL_GPL(tb_register_property_dir); |
| 1536 | |
| 1537 | /** |
| 1538 | * tb_unregister_property_dir() - Removes property directory from host |
| 1539 | * @key: Key (name) of the directory |
| 1540 | * @dir: Directory to remove |
| 1541 | * |
| 1542 | * This will remove the existing directory from this host and notify the |
| 1543 | * connected hosts about the change. |
| 1544 | */ |
| 1545 | void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) |
| 1546 | { |
| 1547 | int ret = 0; |
| 1548 | |
| 1549 | mutex_lock(&xdomain_lock); |
| 1550 | if (remove_directory(key, dir)) |
| 1551 | ret = rebuild_property_block(); |
| 1552 | mutex_unlock(&xdomain_lock); |
| 1553 | |
| 1554 | if (!ret) |
| 1555 | update_all_xdomains(); |
| 1556 | } |
| 1557 | EXPORT_SYMBOL_GPL(tb_unregister_property_dir); |
| 1558 | |
| 1559 | int tb_xdomain_init(void) |
| 1560 | { |
| 1561 | int ret; |
| 1562 | |
| 1563 | xdomain_property_dir = tb_property_create_dir(NULL); |
| 1564 | if (!xdomain_property_dir) |
| 1565 | return -ENOMEM; |
| 1566 | |
| 1567 | /* |
| 1568 | * Initialize standard set of properties without any service |
| 1569 | * directories. Those will be added by service drivers |
| 1570 | * themselves when they are loaded. |
| 1571 | */ |
| 1572 | tb_property_add_immediate(xdomain_property_dir, "vendorid", |
| 1573 | PCI_VENDOR_ID_INTEL); |
| 1574 | tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp."); |
| 1575 | tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); |
| 1576 | tb_property_add_text(xdomain_property_dir, "deviceid", |
| 1577 | utsname()->nodename); |
| 1578 | tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); |
| 1579 | |
| 1580 | ret = rebuild_property_block(); |
| 1581 | if (ret) { |
| 1582 | tb_property_free_dir(xdomain_property_dir); |
| 1583 | xdomain_property_dir = NULL; |
| 1584 | } |
| 1585 | |
| 1586 | return ret; |
| 1587 | } |
| 1588 | |
| 1589 | void tb_xdomain_exit(void) |
| 1590 | { |
| 1591 | kfree(xdomain_property_block); |
| 1592 | tb_property_free_dir(xdomain_property_dir); |
| 1593 | } |