blob: 68c1b93ac5d9ea90eccd3a25ea7b18cf15242ee9 [file] [log] [blame]
Mika Westerbergfd3b3392018-10-01 12:31:21 +03001// SPDX-License-Identifier: GPL-2.0
Mika Westerberg9d3cce02017-06-06 15:25:00 +03002/*
3 * Thunderbolt bus support
4 *
5 * Copyright (C) 2017, Intel Corporation
Mika Westerbergfd3b3392018-10-01 12:31:21 +03006 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Mika Westerberg9d3cce02017-06-06 15:25:00 +03007 */
8
9#include <linux/device.h>
Mika Westerbergdcc3c9e2018-10-31 14:06:52 +030010#include <linux/dmar.h>
Mika Westerberg9d3cce02017-06-06 15:25:00 +030011#include <linux/idr.h>
Mika Westerbergdcc3c9e2018-10-31 14:06:52 +030012#include <linux/iommu.h>
Mika Westerberg9d3cce02017-06-06 15:25:00 +030013#include <linux/module.h>
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +030014#include <linux/pm_runtime.h>
Mika Westerberg9d3cce02017-06-06 15:25:00 +030015#include <linux/slab.h>
Mika Westerbergf67cf492017-06-06 15:25:16 +030016#include <linux/random.h>
17#include <crypto/hash.h>
Mika Westerberg9d3cce02017-06-06 15:25:00 +030018
19#include "tb.h"
20
21static DEFINE_IDA(tb_domain_ida);
22
Mika Westerbergd1ff7022017-10-02 13:38:34 +030023static bool match_service_id(const struct tb_service_id *id,
24 const struct tb_service *svc)
25{
26 if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 if (strcmp(id->protocol_key, svc->key))
28 return false;
29 }
30
31 if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 if (id->protocol_id != svc->prtcid)
33 return false;
34 }
35
36 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 if (id->protocol_version != svc->prtcvers)
38 return false;
39 }
40
41 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 if (id->protocol_revision != svc->prtcrevs)
43 return false;
44 }
45
46 return true;
47}
48
49static const struct tb_service_id *__tb_service_match(struct device *dev,
50 struct device_driver *drv)
51{
52 struct tb_service_driver *driver;
53 const struct tb_service_id *ids;
54 struct tb_service *svc;
55
56 svc = tb_to_service(dev);
57 if (!svc)
58 return NULL;
59
60 driver = container_of(drv, struct tb_service_driver, driver);
61 if (!driver->id_table)
62 return NULL;
63
64 for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 if (match_service_id(ids, svc))
66 return ids;
67 }
68
69 return NULL;
70}
71
72static int tb_service_match(struct device *dev, struct device_driver *drv)
73{
74 return !!__tb_service_match(dev, drv);
75}
76
77static int tb_service_probe(struct device *dev)
78{
79 struct tb_service *svc = tb_to_service(dev);
80 struct tb_service_driver *driver;
81 const struct tb_service_id *id;
82
83 driver = container_of(dev->driver, struct tb_service_driver, driver);
84 id = __tb_service_match(dev, &driver->driver);
85
86 return driver->probe(svc, id);
87}
88
89static int tb_service_remove(struct device *dev)
90{
91 struct tb_service *svc = tb_to_service(dev);
92 struct tb_service_driver *driver;
93
94 driver = container_of(dev->driver, struct tb_service_driver, driver);
95 if (driver->remove)
96 driver->remove(svc);
97
98 return 0;
99}
100
101static void tb_service_shutdown(struct device *dev)
102{
103 struct tb_service_driver *driver;
104 struct tb_service *svc;
105
106 svc = tb_to_service(dev);
107 if (!svc || !dev->driver)
108 return;
109
110 driver = container_of(dev->driver, struct tb_service_driver, driver);
111 if (driver->shutdown)
112 driver->shutdown(svc);
113}
114
Mika Westerbergf67cf492017-06-06 15:25:16 +0300115static const char * const tb_security_names[] = {
116 [TB_SECURITY_NONE] = "none",
117 [TB_SECURITY_USER] = "user",
118 [TB_SECURITY_SECURE] = "secure",
119 [TB_SECURITY_DPONLY] = "dponly",
Mika Westerberg6fc14e12017-12-08 14:11:39 +0300120 [TB_SECURITY_USBONLY] = "usbonly",
Mika Westerbergf67cf492017-06-06 15:25:16 +0300121};
122
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200123static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
124 char *buf)
125{
126 struct tb *tb = container_of(dev, struct tb, dev);
127 uuid_t *uuids;
128 ssize_t ret;
129 int i;
130
131 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
132 if (!uuids)
133 return -ENOMEM;
134
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300135 pm_runtime_get_sync(&tb->dev);
136
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200137 if (mutex_lock_interruptible(&tb->lock)) {
138 ret = -ERESTARTSYS;
139 goto out;
140 }
141 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
142 if (ret) {
143 mutex_unlock(&tb->lock);
144 goto out;
145 }
146 mutex_unlock(&tb->lock);
147
148 for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
149 if (!uuid_is_null(&uuids[i]))
Takashi Iwai30105182020-03-11 10:28:07 +0100150 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200151 &uuids[i]);
152
Takashi Iwai30105182020-03-11 10:28:07 +0100153 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200154 i < tb->nboot_acl - 1 ? "," : "\n");
155 }
156
157out:
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300158 pm_runtime_mark_last_busy(&tb->dev);
159 pm_runtime_put_autosuspend(&tb->dev);
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200160 kfree(uuids);
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300161
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200162 return ret;
163}
164
165static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
166 const char *buf, size_t count)
167{
168 struct tb *tb = container_of(dev, struct tb, dev);
169 char *str, *s, *uuid_str;
170 ssize_t ret = 0;
171 uuid_t *acl;
172 int i = 0;
173
174 /*
175 * Make sure the value is not bigger than tb->nboot_acl * UUID
176 * length + commas and optional "\n". Also the smallest allowable
177 * string is tb->nboot_acl * ",".
178 */
179 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
180 return -EINVAL;
181 if (count < tb->nboot_acl - 1)
182 return -EINVAL;
183
184 str = kstrdup(buf, GFP_KERNEL);
185 if (!str)
186 return -ENOMEM;
187
188 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
189 if (!acl) {
190 ret = -ENOMEM;
191 goto err_free_str;
192 }
193
194 uuid_str = strim(str);
195 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
196 size_t len = strlen(s);
197
198 if (len) {
199 if (len != UUID_STRING_LEN) {
200 ret = -EINVAL;
201 goto err_free_acl;
202 }
203 ret = uuid_parse(s, &acl[i]);
204 if (ret)
205 goto err_free_acl;
206 }
207
208 i++;
209 }
210
211 if (s || i < tb->nboot_acl) {
212 ret = -EINVAL;
213 goto err_free_acl;
214 }
215
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300216 pm_runtime_get_sync(&tb->dev);
217
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200218 if (mutex_lock_interruptible(&tb->lock)) {
219 ret = -ERESTARTSYS;
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300220 goto err_rpm_put;
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200221 }
222 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
Mika Westerberg007a7492018-06-26 14:46:35 +0300223 if (!ret) {
224 /* Notify userspace about the change */
225 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
226 }
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200227 mutex_unlock(&tb->lock);
228
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300229err_rpm_put:
230 pm_runtime_mark_last_busy(&tb->dev);
231 pm_runtime_put_autosuspend(&tb->dev);
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200232err_free_acl:
233 kfree(acl);
234err_free_str:
235 kfree(str);
236
237 return ret ?: count;
238}
239static DEVICE_ATTR_RW(boot_acl);
240
Mika Westerbergdcc3c9e2018-10-31 14:06:52 +0300241static ssize_t iommu_dma_protection_show(struct device *dev,
242 struct device_attribute *attr,
243 char *buf)
244{
245 /*
246 * Kernel DMA protection is a feature where Thunderbolt security is
247 * handled natively using IOMMU. It is enabled when IOMMU is
248 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
249 */
250 return sprintf(buf, "%d\n",
251 iommu_present(&pci_bus_type) && dmar_platform_optin());
252}
253static DEVICE_ATTR_RO(iommu_dma_protection);
254
Mika Westerbergf67cf492017-06-06 15:25:16 +0300255static ssize_t security_show(struct device *dev, struct device_attribute *attr,
256 char *buf)
257{
258 struct tb *tb = container_of(dev, struct tb, dev);
Mika Westerberg6fc14e12017-12-08 14:11:39 +0300259 const char *name = "unknown";
Mika Westerbergf67cf492017-06-06 15:25:16 +0300260
Mika Westerberg6fc14e12017-12-08 14:11:39 +0300261 if (tb->security_level < ARRAY_SIZE(tb_security_names))
262 name = tb_security_names[tb->security_level];
263
264 return sprintf(buf, "%s\n", name);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300265}
266static DEVICE_ATTR_RO(security);
267
268static struct attribute *domain_attrs[] = {
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200269 &dev_attr_boot_acl.attr,
Mika Westerbergdcc3c9e2018-10-31 14:06:52 +0300270 &dev_attr_iommu_dma_protection.attr,
Mika Westerbergf67cf492017-06-06 15:25:16 +0300271 &dev_attr_security.attr,
272 NULL,
273};
274
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200275static umode_t domain_attr_is_visible(struct kobject *kobj,
276 struct attribute *attr, int n)
277{
278 struct device *dev = container_of(kobj, struct device, kobj);
279 struct tb *tb = container_of(dev, struct tb, dev);
280
281 if (attr == &dev_attr_boot_acl.attr) {
282 if (tb->nboot_acl &&
283 tb->cm_ops->get_boot_acl &&
284 tb->cm_ops->set_boot_acl)
285 return attr->mode;
286 return 0;
287 }
288
289 return attr->mode;
290}
291
Mika Westerbergf67cf492017-06-06 15:25:16 +0300292static struct attribute_group domain_attr_group = {
Mika Westerberg9aaa3b82018-01-21 12:08:04 +0200293 .is_visible = domain_attr_is_visible,
Mika Westerbergf67cf492017-06-06 15:25:16 +0300294 .attrs = domain_attrs,
295};
296
297static const struct attribute_group *domain_attr_groups[] = {
298 &domain_attr_group,
299 NULL,
300};
301
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300302struct bus_type tb_bus_type = {
303 .name = "thunderbolt",
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300304 .match = tb_service_match,
305 .probe = tb_service_probe,
306 .remove = tb_service_remove,
307 .shutdown = tb_service_shutdown,
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300308};
309
310static void tb_domain_release(struct device *dev)
311{
312 struct tb *tb = container_of(dev, struct tb, dev);
313
314 tb_ctl_free(tb->ctl);
315 destroy_workqueue(tb->wq);
316 ida_simple_remove(&tb_domain_ida, tb->index);
317 mutex_destroy(&tb->lock);
318 kfree(tb);
319}
320
321struct device_type tb_domain_type = {
322 .name = "thunderbolt_domain",
323 .release = tb_domain_release,
324};
325
326/**
327 * tb_domain_alloc() - Allocate a domain
328 * @nhi: Pointer to the host controller
329 * @privsize: Size of the connection manager private data
330 *
331 * Allocates and initializes a new Thunderbolt domain. Connection
332 * managers are expected to call this and then fill in @cm_ops
333 * accordingly.
334 *
335 * Call tb_domain_put() to release the domain before it has been added
336 * to the system.
337 *
338 * Return: allocated domain structure on %NULL in case of error
339 */
340struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
341{
342 struct tb *tb;
343
344 /*
345 * Make sure the structure sizes map with that the hardware
346 * expects because bit-fields are being used.
347 */
348 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
349 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
350 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
351
352 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
353 if (!tb)
354 return NULL;
355
356 tb->nhi = nhi;
357 mutex_init(&tb->lock);
358
359 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
360 if (tb->index < 0)
361 goto err_free;
362
363 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
364 if (!tb->wq)
365 goto err_remove_ida;
366
367 tb->dev.parent = &nhi->pdev->dev;
368 tb->dev.bus = &tb_bus_type;
369 tb->dev.type = &tb_domain_type;
Mika Westerbergf67cf492017-06-06 15:25:16 +0300370 tb->dev.groups = domain_attr_groups;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300371 dev_set_name(&tb->dev, "domain%d", tb->index);
372 device_initialize(&tb->dev);
373
374 return tb;
375
376err_remove_ida:
377 ida_simple_remove(&tb_domain_ida, tb->index);
378err_free:
379 kfree(tb);
380
381 return NULL;
382}
383
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300384static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300385 const void *buf, size_t size)
386{
387 struct tb *tb = data;
388
389 if (!tb->cm_ops->handle_event) {
390 tb_warn(tb, "domain does not have event handler\n");
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300391 return true;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300392 }
393
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300394 switch (type) {
395 case TB_CFG_PKG_XDOMAIN_REQ:
396 case TB_CFG_PKG_XDOMAIN_RESP:
397 return tb_xdomain_handle_request(tb, type, buf, size);
398
399 default:
400 tb->cm_ops->handle_event(tb, type, buf, size);
401 }
402
403 return true;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300404}
405
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300406/**
407 * tb_domain_add() - Add domain to the system
408 * @tb: Domain to add
409 *
410 * Starts the domain and adds it to the system. Hotplugging devices will
411 * work after this has been returned successfully. In order to remove
412 * and release the domain after this function has been called, call
413 * tb_domain_remove().
414 *
415 * Return: %0 in case of success and negative errno in case of error
416 */
417int tb_domain_add(struct tb *tb)
418{
419 int ret;
420
421 if (WARN_ON(!tb->cm_ops))
422 return -EINVAL;
423
424 mutex_lock(&tb->lock);
425
Mika Westerberg81a54b52017-06-06 15:25:09 +0300426 tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300427 if (!tb->ctl) {
428 ret = -ENOMEM;
429 goto err_unlock;
430 }
431
432 /*
433 * tb_schedule_hotplug_handler may be called as soon as the config
434 * channel is started. Thats why we have to hold the lock here.
435 */
436 tb_ctl_start(tb->ctl);
437
Mika Westerbergf67cf492017-06-06 15:25:16 +0300438 if (tb->cm_ops->driver_ready) {
439 ret = tb->cm_ops->driver_ready(tb);
440 if (ret)
441 goto err_ctl_stop;
442 }
443
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300444 ret = device_add(&tb->dev);
445 if (ret)
446 goto err_ctl_stop;
447
448 /* Start the domain */
449 if (tb->cm_ops->start) {
450 ret = tb->cm_ops->start(tb);
451 if (ret)
452 goto err_domain_del;
453 }
454
455 /* This starts event processing */
456 mutex_unlock(&tb->lock);
457
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300458 pm_runtime_no_callbacks(&tb->dev);
459 pm_runtime_set_active(&tb->dev);
460 pm_runtime_enable(&tb->dev);
461 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
462 pm_runtime_mark_last_busy(&tb->dev);
463 pm_runtime_use_autosuspend(&tb->dev);
464
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300465 return 0;
466
467err_domain_del:
468 device_del(&tb->dev);
469err_ctl_stop:
470 tb_ctl_stop(tb->ctl);
471err_unlock:
472 mutex_unlock(&tb->lock);
473
474 return ret;
475}
476
477/**
478 * tb_domain_remove() - Removes and releases a domain
479 * @tb: Domain to remove
480 *
481 * Stops the domain, removes it from the system and releases all
482 * resources once the last reference has been released.
483 */
484void tb_domain_remove(struct tb *tb)
485{
486 mutex_lock(&tb->lock);
487 if (tb->cm_ops->stop)
488 tb->cm_ops->stop(tb);
489 /* Stop the domain control traffic */
490 tb_ctl_stop(tb->ctl);
491 mutex_unlock(&tb->lock);
492
493 flush_workqueue(tb->wq);
494 device_unregister(&tb->dev);
495}
496
497/**
498 * tb_domain_suspend_noirq() - Suspend a domain
499 * @tb: Domain to suspend
500 *
501 * Suspends all devices in the domain and stops the control channel.
502 */
503int tb_domain_suspend_noirq(struct tb *tb)
504{
505 int ret = 0;
506
507 /*
508 * The control channel interrupt is left enabled during suspend
509 * and taking the lock here prevents any events happening before
510 * we actually have stopped the domain and the control channel.
511 */
512 mutex_lock(&tb->lock);
513 if (tb->cm_ops->suspend_noirq)
514 ret = tb->cm_ops->suspend_noirq(tb);
515 if (!ret)
516 tb_ctl_stop(tb->ctl);
517 mutex_unlock(&tb->lock);
518
519 return ret;
520}
521
522/**
523 * tb_domain_resume_noirq() - Resume a domain
524 * @tb: Domain to resume
525 *
526 * Re-starts the control channel, and resumes all devices connected to
527 * the domain.
528 */
529int tb_domain_resume_noirq(struct tb *tb)
530{
531 int ret = 0;
532
533 mutex_lock(&tb->lock);
534 tb_ctl_start(tb->ctl);
535 if (tb->cm_ops->resume_noirq)
536 ret = tb->cm_ops->resume_noirq(tb);
537 mutex_unlock(&tb->lock);
538
539 return ret;
540}
541
Mika Westerbergf67cf492017-06-06 15:25:16 +0300542int tb_domain_suspend(struct tb *tb)
543{
Mika Westerberg84db6852018-07-25 11:03:18 +0300544 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
Mika Westerbergf67cf492017-06-06 15:25:16 +0300545}
546
547void tb_domain_complete(struct tb *tb)
548{
Mika Westerbergf67cf492017-06-06 15:25:16 +0300549 if (tb->cm_ops->complete)
550 tb->cm_ops->complete(tb);
Mika Westerbergf67cf492017-06-06 15:25:16 +0300551}
552
Mika Westerberg2d8ff0b2018-07-25 11:48:39 +0300553int tb_domain_runtime_suspend(struct tb *tb)
554{
555 if (tb->cm_ops->runtime_suspend) {
556 int ret = tb->cm_ops->runtime_suspend(tb);
557 if (ret)
558 return ret;
559 }
560 tb_ctl_stop(tb->ctl);
561 return 0;
562}
563
564int tb_domain_runtime_resume(struct tb *tb)
565{
566 tb_ctl_start(tb->ctl);
567 if (tb->cm_ops->runtime_resume) {
568 int ret = tb->cm_ops->runtime_resume(tb);
569 if (ret)
570 return ret;
571 }
572 return 0;
573}
574
Mika Westerbergf67cf492017-06-06 15:25:16 +0300575/**
576 * tb_domain_approve_switch() - Approve switch
577 * @tb: Domain the switch belongs to
578 * @sw: Switch to approve
579 *
580 * This will approve switch by connection manager specific means. In
581 * case of success the connection manager will create tunnels for all
582 * supported protocols.
583 */
584int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
585{
586 struct tb_switch *parent_sw;
587
588 if (!tb->cm_ops->approve_switch)
589 return -EPERM;
590
591 /* The parent switch must be authorized before this one */
592 parent_sw = tb_to_switch(sw->dev.parent);
593 if (!parent_sw || !parent_sw->authorized)
594 return -EINVAL;
595
596 return tb->cm_ops->approve_switch(tb, sw);
597}
598
599/**
600 * tb_domain_approve_switch_key() - Approve switch and add key
601 * @tb: Domain the switch belongs to
602 * @sw: Switch to approve
603 *
604 * For switches that support secure connect, this function first adds
605 * key to the switch NVM using connection manager specific means. If
606 * adding the key is successful, the switch is approved and connected.
607 *
608 * Return: %0 on success and negative errno in case of failure.
609 */
610int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
611{
612 struct tb_switch *parent_sw;
613 int ret;
614
615 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
616 return -EPERM;
617
618 /* The parent switch must be authorized before this one */
619 parent_sw = tb_to_switch(sw->dev.parent);
620 if (!parent_sw || !parent_sw->authorized)
621 return -EINVAL;
622
623 ret = tb->cm_ops->add_switch_key(tb, sw);
624 if (ret)
625 return ret;
626
627 return tb->cm_ops->approve_switch(tb, sw);
628}
629
630/**
631 * tb_domain_challenge_switch_key() - Challenge and approve switch
632 * @tb: Domain the switch belongs to
633 * @sw: Switch to approve
634 *
635 * For switches that support secure connect, this function generates
636 * random challenge and sends it to the switch. The switch responds to
637 * this and if the response matches our random challenge, the switch is
638 * approved and connected.
639 *
640 * Return: %0 on success and negative errno in case of failure.
641 */
642int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
643{
644 u8 challenge[TB_SWITCH_KEY_SIZE];
645 u8 response[TB_SWITCH_KEY_SIZE];
646 u8 hmac[TB_SWITCH_KEY_SIZE];
647 struct tb_switch *parent_sw;
648 struct crypto_shash *tfm;
649 struct shash_desc *shash;
650 int ret;
651
652 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
653 return -EPERM;
654
655 /* The parent switch must be authorized before this one */
656 parent_sw = tb_to_switch(sw->dev.parent);
657 if (!parent_sw || !parent_sw->authorized)
658 return -EINVAL;
659
660 get_random_bytes(challenge, sizeof(challenge));
661 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
662 if (ret)
663 return ret;
664
665 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
666 if (IS_ERR(tfm))
667 return PTR_ERR(tfm);
668
669 ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
670 if (ret)
671 goto err_free_tfm;
672
673 shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
674 GFP_KERNEL);
675 if (!shash) {
676 ret = -ENOMEM;
677 goto err_free_tfm;
678 }
679
680 shash->tfm = tfm;
Mika Westerbergf67cf492017-06-06 15:25:16 +0300681
682 memset(hmac, 0, sizeof(hmac));
683 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
684 if (ret)
685 goto err_free_shash;
686
687 /* The returned HMAC must match the one we calculated */
688 if (memcmp(response, hmac, sizeof(hmac))) {
689 ret = -EKEYREJECTED;
690 goto err_free_shash;
691 }
692
693 crypto_free_shash(tfm);
694 kfree(shash);
695
696 return tb->cm_ops->approve_switch(tb, sw);
697
698err_free_shash:
699 kfree(shash);
700err_free_tfm:
701 crypto_free_shash(tfm);
702
703 return ret;
704}
705
Mika Westerberge6b245c2017-06-06 15:25:17 +0300706/**
707 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
708 * @tb: Domain whose PCIe paths to disconnect
709 *
710 * This needs to be called in preparation for NVM upgrade of the host
711 * controller. Makes sure all PCIe paths are disconnected.
712 *
713 * Return %0 on success and negative errno in case of error.
714 */
715int tb_domain_disconnect_pcie_paths(struct tb *tb)
716{
717 if (!tb->cm_ops->disconnect_pcie_paths)
718 return -EPERM;
719
720 return tb->cm_ops->disconnect_pcie_paths(tb);
721}
722
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300723/**
724 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
725 * @tb: Domain enabling the DMA paths
726 * @xd: XDomain DMA paths are created to
727 *
728 * Calls connection manager specific method to enable DMA paths to the
729 * XDomain in question.
730 *
731 * Return: 0% in case of success and negative errno otherwise. In
732 * particular returns %-ENOTSUPP if the connection manager
733 * implementation does not support XDomains.
734 */
735int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
736{
737 if (!tb->cm_ops->approve_xdomain_paths)
738 return -ENOTSUPP;
739
740 return tb->cm_ops->approve_xdomain_paths(tb, xd);
741}
742
743/**
744 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
745 * @tb: Domain disabling the DMA paths
746 * @xd: XDomain whose DMA paths are disconnected
747 *
748 * Calls connection manager specific method to disconnect DMA paths to
749 * the XDomain in question.
750 *
751 * Return: 0% in case of success and negative errno otherwise. In
752 * particular returns %-ENOTSUPP if the connection manager
753 * implementation does not support XDomains.
754 */
755int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
756{
757 if (!tb->cm_ops->disconnect_xdomain_paths)
758 return -ENOTSUPP;
759
760 return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
761}
762
763static int disconnect_xdomain(struct device *dev, void *data)
764{
765 struct tb_xdomain *xd;
766 struct tb *tb = data;
767 int ret = 0;
768
769 xd = tb_to_xdomain(dev);
770 if (xd && xd->tb == tb)
771 ret = tb_xdomain_disable_paths(xd);
772
773 return ret;
774}
775
776/**
777 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
778 * @tb: Domain whose paths are disconnected
779 *
780 * This function can be used to disconnect all paths (PCIe, XDomain) for
781 * example in preparation for host NVM firmware upgrade. After this is
782 * called the paths cannot be established without resetting the switch.
783 *
784 * Return: %0 in case of success and negative errno otherwise.
785 */
786int tb_domain_disconnect_all_paths(struct tb *tb)
787{
788 int ret;
789
790 ret = tb_domain_disconnect_pcie_paths(tb);
791 if (ret)
792 return ret;
793
794 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
795}
796
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300797int tb_domain_init(void)
798{
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300799 int ret;
800
801 ret = tb_xdomain_init();
802 if (ret)
803 return ret;
804 ret = bus_register(&tb_bus_type);
805 if (ret)
806 tb_xdomain_exit();
807
808 return ret;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300809}
810
811void tb_domain_exit(void)
812{
813 bus_unregister(&tb_bus_type);
814 ida_destroy(&tb_domain_ida);
Mika Westerberge6b245c2017-06-06 15:25:17 +0300815 tb_switch_exit();
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300816 tb_xdomain_exit();
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300817}