blob: c850b0ac098c7171309ccf7680b80e77433bc686 [file] [log] [blame]
Gil Fine54e41812020-06-29 20:30:52 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Debugfs interface
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Gil Fine <gil.fine@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 */
9
10#include <linux/debugfs.h>
11#include <linux/pm_runtime.h>
Casey Bowman77455122020-10-07 16:13:07 -070012#include <linux/uaccess.h>
Gil Fine54e41812020-06-29 20:30:52 +030013
14#include "tb.h"
15
16#define PORT_CAP_PCIE_LEN 1
17#define PORT_CAP_POWER_LEN 2
18#define PORT_CAP_LANE_LEN 3
19#define PORT_CAP_USB3_LEN 5
20#define PORT_CAP_DP_LEN 8
21#define PORT_CAP_TMU_LEN 8
22#define PORT_CAP_BASIC_LEN 9
23#define PORT_CAP_USB4_LEN 20
24
25#define SWITCH_CAP_TMU_LEN 26
26#define SWITCH_CAP_BASIC_LEN 27
27
28#define PATH_LEN 2
29
30#define COUNTER_SET_LEN 3
31
32#define DEBUGFS_ATTR(__space, __write) \
33static int __space ## _open(struct inode *inode, struct file *file) \
34{ \
35 return single_open(file, __space ## _show, inode->i_private); \
36} \
37 \
38static const struct file_operations __space ## _fops = { \
39 .owner = THIS_MODULE, \
40 .open = __space ## _open, \
41 .release = single_release, \
42 .read = seq_read, \
43 .write = __write, \
44 .llseek = seq_lseek, \
45}
46
47#define DEBUGFS_ATTR_RO(__space) \
48 DEBUGFS_ATTR(__space, NULL)
49
50#define DEBUGFS_ATTR_RW(__space) \
51 DEBUGFS_ATTR(__space, __space ## _write)
52
53static struct dentry *tb_debugfs_root;
54
55static void *validate_and_copy_from_user(const void __user *user_buf,
56 size_t *count)
57{
58 size_t nbytes;
59 void *buf;
60
61 if (!*count)
62 return ERR_PTR(-EINVAL);
63
64 if (!access_ok(user_buf, *count))
65 return ERR_PTR(-EFAULT);
66
67 buf = (void *)get_zeroed_page(GFP_KERNEL);
68 if (!buf)
69 return ERR_PTR(-ENOMEM);
70
71 nbytes = min_t(size_t, *count, PAGE_SIZE);
72 if (copy_from_user(buf, user_buf, nbytes)) {
73 free_page((unsigned long)buf);
74 return ERR_PTR(-EFAULT);
75 }
76
77 *count = nbytes;
78 return buf;
79}
80
81static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
82 int long_fmt_len)
83{
84 char *token;
85 u32 v[5];
86 int ret;
87
88 token = strsep(line, "\n");
89 if (!token)
90 return false;
91
92 /*
93 * For Adapter/Router configuration space:
94 * Short format is: offset value\n
95 * v[0] v[1]
96 * Long format as produced from the read side:
97 * offset relative_offset cap_id vs_cap_id value\n
98 * v[0] v[1] v[2] v[3] v[4]
99 *
100 * For Counter configuration space:
101 * Short format is: offset\n
102 * v[0]
103 * Long format as produced from the read side:
104 * offset relative_offset counter_id value\n
105 * v[0] v[1] v[2] v[3]
106 */
107 ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
108 /* In case of Counters, clear counter, "val" content is NA */
109 if (ret == short_fmt_len) {
110 *offs = v[0];
111 *val = v[short_fmt_len - 1];
112 return true;
113 } else if (ret == long_fmt_len) {
114 *offs = v[0];
115 *val = v[long_fmt_len - 1];
116 return true;
117 }
118
119 return false;
120}
121
122#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
123static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
124 const char __user *user_buf, size_t count,
125 loff_t *ppos)
126{
127 struct tb *tb = sw->tb;
128 char *line, *buf;
129 u32 val, offset;
130 int ret = 0;
131
132 buf = validate_and_copy_from_user(user_buf, &count);
133 if (IS_ERR(buf))
134 return PTR_ERR(buf);
135
136 pm_runtime_get_sync(&sw->dev);
137
138 if (mutex_lock_interruptible(&tb->lock)) {
139 ret = -ERESTARTSYS;
140 goto out;
141 }
142
143 /* User did hardware changes behind the driver's back */
144 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
145
146 line = buf;
147 while (parse_line(&line, &offset, &val, 2, 5)) {
148 if (port)
149 ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
150 else
151 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
152 if (ret)
153 break;
154 }
155
156 mutex_unlock(&tb->lock);
157
158out:
159 pm_runtime_mark_last_busy(&sw->dev);
160 pm_runtime_put_autosuspend(&sw->dev);
161 free_page((unsigned long)buf);
162
163 return ret < 0 ? ret : count;
164}
165
166static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
167 size_t count, loff_t *ppos)
168{
169 struct seq_file *s = file->private_data;
170 struct tb_port *port = s->private;
171
172 return regs_write(port->sw, port, user_buf, count, ppos);
173}
174
175static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
176 size_t count, loff_t *ppos)
177{
178 struct seq_file *s = file->private_data;
179 struct tb_switch *sw = s->private;
180
181 return regs_write(sw, NULL, user_buf, count, ppos);
182}
183#define DEBUGFS_MODE 0600
184#else
185#define port_regs_write NULL
186#define switch_regs_write NULL
187#define DEBUGFS_MODE 0400
188#endif
189
190static int port_clear_all_counters(struct tb_port *port)
191{
192 u32 *buf;
193 int ret;
194
195 buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
196 GFP_KERNEL);
197 if (!buf)
198 return -ENOMEM;
199
200 ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
201 COUNTER_SET_LEN * port->config.max_counters);
202 kfree(buf);
203
204 return ret;
205}
206
207static ssize_t counters_write(struct file *file, const char __user *user_buf,
208 size_t count, loff_t *ppos)
209{
210 struct seq_file *s = file->private_data;
211 struct tb_port *port = s->private;
212 struct tb_switch *sw = port->sw;
213 struct tb *tb = port->sw->tb;
214 char *buf;
215 int ret;
216
217 buf = validate_and_copy_from_user(user_buf, &count);
218 if (IS_ERR(buf))
219 return PTR_ERR(buf);
220
221 pm_runtime_get_sync(&sw->dev);
222
223 if (mutex_lock_interruptible(&tb->lock)) {
224 ret = -ERESTARTSYS;
225 goto out;
226 }
227
228 /* If written delimiter only, clear all counters in one shot */
229 if (buf[0] == '\n') {
230 ret = port_clear_all_counters(port);
231 } else {
232 char *line = buf;
233 u32 val, offset;
234
Dan Carpenter77e4907f2020-09-10 13:08:05 +0300235 ret = -EINVAL;
Gil Fine54e41812020-06-29 20:30:52 +0300236 while (parse_line(&line, &offset, &val, 1, 4)) {
237 ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
238 offset, 1);
239 if (ret)
240 break;
241 }
242 }
243
244 mutex_unlock(&tb->lock);
245
246out:
247 pm_runtime_mark_last_busy(&sw->dev);
248 pm_runtime_put_autosuspend(&sw->dev);
249 free_page((unsigned long)buf);
250
251 return ret < 0 ? ret : count;
252}
253
Gil Fine815f4212021-02-16 15:04:26 +0200254static void cap_show_by_dw(struct seq_file *s, struct tb_switch *sw,
255 struct tb_port *port, unsigned int cap,
256 unsigned int offset, u8 cap_id, u8 vsec_id,
257 int dwords)
258{
259 int i, ret;
260 u32 data;
261
262 for (i = 0; i < dwords; i++) {
263 if (port)
264 ret = tb_port_read(port, &data, TB_CFG_PORT, cap + offset + i, 1);
265 else
266 ret = tb_sw_read(sw, &data, TB_CFG_SWITCH, cap + offset + i, 1);
267 if (ret) {
Gil Fine33826652021-03-09 11:23:30 +0200268 seq_printf(s, "0x%04x <not accessible>\n", cap + offset + i);
269 continue;
Gil Fine815f4212021-02-16 15:04:26 +0200270 }
271
272 seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", cap + offset + i,
273 offset + i, cap_id, vsec_id, data);
274 }
275}
276
Gil Fine54e41812020-06-29 20:30:52 +0300277static void cap_show(struct seq_file *s, struct tb_switch *sw,
278 struct tb_port *port, unsigned int cap, u8 cap_id,
279 u8 vsec_id, int length)
280{
281 int ret, offset = 0;
282
283 while (length > 0) {
284 int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
285 u32 data[TB_MAX_CONFIG_RW_LENGTH];
286
287 if (port)
288 ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
289 dwords);
290 else
291 ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
292 if (ret) {
Gil Fine33826652021-03-09 11:23:30 +0200293 cap_show_by_dw(s, sw, port, cap, offset, cap_id, vsec_id, length);
Gil Fine54e41812020-06-29 20:30:52 +0300294 return;
295 }
296
297 for (i = 0; i < dwords; i++) {
298 seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
299 cap + offset + i, offset + i,
300 cap_id, vsec_id, data[i]);
301 }
302
303 length -= dwords;
304 offset += dwords;
305 }
306}
307
308static void port_cap_show(struct tb_port *port, struct seq_file *s,
309 unsigned int cap)
310{
311 struct tb_cap_any header;
312 u8 vsec_id = 0;
313 size_t length;
314 int ret;
315
316 ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
317 if (ret) {
318 seq_printf(s, "0x%04x <capability read failed>\n", cap);
319 return;
320 }
321
322 switch (header.basic.cap) {
323 case TB_PORT_CAP_PHY:
324 length = PORT_CAP_LANE_LEN;
325 break;
326
327 case TB_PORT_CAP_TIME1:
328 length = PORT_CAP_TMU_LEN;
329 break;
330
331 case TB_PORT_CAP_POWER:
332 length = PORT_CAP_POWER_LEN;
333 break;
334
335 case TB_PORT_CAP_ADAP:
336 if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
337 length = PORT_CAP_PCIE_LEN;
338 } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
339 length = PORT_CAP_DP_LEN;
340 } else if (tb_port_is_usb3_down(port) ||
341 tb_port_is_usb3_up(port)) {
342 length = PORT_CAP_USB3_LEN;
343 } else {
344 seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
345 cap, header.basic.cap);
346 return;
347 }
348 break;
349
350 case TB_PORT_CAP_VSE:
351 if (!header.extended_short.length) {
352 ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
353 cap + 1, 1);
354 if (ret) {
355 seq_printf(s, "0x%04x <capability read failed>\n",
356 cap + 1);
357 return;
358 }
359 length = header.extended_long.length;
360 vsec_id = header.extended_short.vsec_id;
361 } else {
362 length = header.extended_short.length;
363 vsec_id = header.extended_short.vsec_id;
Gil Fine54e41812020-06-29 20:30:52 +0300364 }
365 break;
366
367 case TB_PORT_CAP_USB4:
368 length = PORT_CAP_USB4_LEN;
369 break;
370
371 default:
372 seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
373 cap, header.basic.cap);
374 return;
375 }
376
377 cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
378}
379
380static void port_caps_show(struct tb_port *port, struct seq_file *s)
381{
382 int cap;
383
384 cap = tb_port_next_cap(port, 0);
385 while (cap > 0) {
386 port_cap_show(port, s, cap);
387 cap = tb_port_next_cap(port, cap);
388 }
389}
390
391static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
392{
393 u32 data[PORT_CAP_BASIC_LEN];
394 int ret, i;
395
396 ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
397 if (ret)
398 return ret;
399
400 for (i = 0; i < ARRAY_SIZE(data); i++)
401 seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
402
403 return 0;
404}
405
406static int port_regs_show(struct seq_file *s, void *not_used)
407{
408 struct tb_port *port = s->private;
409 struct tb_switch *sw = port->sw;
410 struct tb *tb = sw->tb;
411 int ret;
412
413 pm_runtime_get_sync(&sw->dev);
414
415 if (mutex_lock_interruptible(&tb->lock)) {
416 ret = -ERESTARTSYS;
417 goto out_rpm_put;
418 }
419
420 seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
421
422 ret = port_basic_regs_show(port, s);
423 if (ret)
424 goto out_unlock;
425
426 port_caps_show(port, s);
427
428out_unlock:
429 mutex_unlock(&tb->lock);
430out_rpm_put:
431 pm_runtime_mark_last_busy(&sw->dev);
432 pm_runtime_put_autosuspend(&sw->dev);
433
434 return ret;
435}
436DEBUGFS_ATTR_RW(port_regs);
437
438static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
439 unsigned int cap)
440{
441 struct tb_cap_any header;
442 int ret, length;
443 u8 vsec_id = 0;
444
445 ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
446 if (ret) {
447 seq_printf(s, "0x%04x <capability read failed>\n", cap);
448 return;
449 }
450
451 if (header.basic.cap == TB_SWITCH_CAP_VSE) {
452 if (!header.extended_short.length) {
453 ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
454 cap + 1, 1);
455 if (ret) {
456 seq_printf(s, "0x%04x <capability read failed>\n",
457 cap + 1);
458 return;
459 }
460 length = header.extended_long.length;
461 } else {
462 length = header.extended_short.length;
463 }
464 vsec_id = header.extended_short.vsec_id;
465 } else {
466 if (header.basic.cap == TB_SWITCH_CAP_TMU) {
467 length = SWITCH_CAP_TMU_LEN;
468 } else {
469 seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
470 cap, header.basic.cap);
471 return;
472 }
473 }
474
475 cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
476}
477
478static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
479{
480 int cap;
481
482 cap = tb_switch_next_cap(sw, 0);
483 while (cap > 0) {
484 switch_cap_show(sw, s, cap);
485 cap = tb_switch_next_cap(sw, cap);
486 }
487}
488
489static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
490{
491 u32 data[SWITCH_CAP_BASIC_LEN];
492 size_t dwords;
493 int ret, i;
494
495 /* Only USB4 has the additional registers */
496 if (tb_switch_is_usb4(sw))
497 dwords = ARRAY_SIZE(data);
498 else
499 dwords = 7;
500
501 ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
502 if (ret)
503 return ret;
504
505 for (i = 0; i < dwords; i++)
506 seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
507
508 return 0;
509}
510
511static int switch_regs_show(struct seq_file *s, void *not_used)
512{
513 struct tb_switch *sw = s->private;
514 struct tb *tb = sw->tb;
515 int ret;
516
517 pm_runtime_get_sync(&sw->dev);
518
519 if (mutex_lock_interruptible(&tb->lock)) {
520 ret = -ERESTARTSYS;
521 goto out_rpm_put;
522 }
523
524 seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
525
526 ret = switch_basic_regs_show(sw, s);
527 if (ret)
528 goto out_unlock;
529
530 switch_caps_show(sw, s);
531
532out_unlock:
533 mutex_unlock(&tb->lock);
534out_rpm_put:
535 pm_runtime_mark_last_busy(&sw->dev);
536 pm_runtime_put_autosuspend(&sw->dev);
537
538 return ret;
539}
540DEBUGFS_ATTR_RW(switch_regs);
541
542static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
543{
544 u32 data[PATH_LEN];
545 int ret, i;
546
547 ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
548 ARRAY_SIZE(data));
549 if (ret) {
550 seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
551 return ret;
552 }
553
554 for (i = 0; i < ARRAY_SIZE(data); i++) {
555 seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
556 hopid * PATH_LEN + i, i, hopid, data[i]);
557 }
558
559 return 0;
560}
561
562static int path_show(struct seq_file *s, void *not_used)
563{
564 struct tb_port *port = s->private;
565 struct tb_switch *sw = port->sw;
566 struct tb *tb = sw->tb;
567 int start, i, ret = 0;
568
569 pm_runtime_get_sync(&sw->dev);
570
571 if (mutex_lock_interruptible(&tb->lock)) {
572 ret = -ERESTARTSYS;
573 goto out_rpm_put;
574 }
575
576 seq_puts(s, "# offset relative_offset in_hop_id value\n");
577
578 /* NHI and lane adapters have entry for path 0 */
579 if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
580 ret = path_show_one(port, s, 0);
581 if (ret)
582 goto out_unlock;
583 }
584
585 start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
586
587 for (i = start; i <= port->config.max_in_hop_id; i++) {
588 ret = path_show_one(port, s, i);
589 if (ret)
590 break;
591 }
592
593out_unlock:
594 mutex_unlock(&tb->lock);
595out_rpm_put:
596 pm_runtime_mark_last_busy(&sw->dev);
597 pm_runtime_put_autosuspend(&sw->dev);
598
599 return ret;
600}
601DEBUGFS_ATTR_RO(path);
602
603static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
604 int counter)
605{
606 u32 data[COUNTER_SET_LEN];
607 int ret, i;
608
609 ret = tb_port_read(port, data, TB_CFG_COUNTERS,
610 counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
611 if (ret) {
612 seq_printf(s, "0x%04x <not accessible>\n",
613 counter * COUNTER_SET_LEN);
614 return ret;
615 }
616
617 for (i = 0; i < ARRAY_SIZE(data); i++) {
618 seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
619 counter * COUNTER_SET_LEN + i, i, counter, data[i]);
620 }
621
622 return 0;
623}
624
625static int counters_show(struct seq_file *s, void *not_used)
626{
627 struct tb_port *port = s->private;
628 struct tb_switch *sw = port->sw;
629 struct tb *tb = sw->tb;
630 int i, ret = 0;
631
632 pm_runtime_get_sync(&sw->dev);
633
634 if (mutex_lock_interruptible(&tb->lock)) {
635 ret = -ERESTARTSYS;
636 goto out;
637 }
638
639 seq_puts(s, "# offset relative_offset counter_id value\n");
640
641 for (i = 0; i < port->config.max_counters; i++) {
642 ret = counter_set_regs_show(port, s, i);
643 if (ret)
644 break;
645 }
646
647 mutex_unlock(&tb->lock);
648
649out:
650 pm_runtime_mark_last_busy(&sw->dev);
651 pm_runtime_put_autosuspend(&sw->dev);
652
653 return ret;
654}
655DEBUGFS_ATTR_RW(counters);
656
657/**
658 * tb_switch_debugfs_init() - Add debugfs entries for router
659 * @sw: Pointer to the router
660 *
661 * Adds debugfs directories and files for given router.
662 */
663void tb_switch_debugfs_init(struct tb_switch *sw)
664{
665 struct dentry *debugfs_dir;
666 struct tb_port *port;
667
668 debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
669 sw->debugfs_dir = debugfs_dir;
670 debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
671 &switch_regs_fops);
672
673 tb_switch_for_each_port(sw, port) {
674 struct dentry *debugfs_dir;
675 char dir_name[10];
676
677 if (port->disabled)
678 continue;
679 if (port->config.type == TB_TYPE_INACTIVE)
680 continue;
681
682 snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
683 debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
684 debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
685 port, &port_regs_fops);
686 debugfs_create_file("path", 0400, debugfs_dir, port,
687 &path_fops);
688 if (port->config.counters_support)
689 debugfs_create_file("counters", 0600, debugfs_dir, port,
690 &counters_fops);
691 }
692}
693
694/**
695 * tb_switch_debugfs_remove() - Remove all router debugfs entries
696 * @sw: Pointer to the router
697 *
698 * Removes all previously added debugfs entries under this router.
699 */
700void tb_switch_debugfs_remove(struct tb_switch *sw)
701{
702 debugfs_remove_recursive(sw->debugfs_dir);
703}
704
Mika Westerberg407ac932020-10-07 17:53:44 +0300705/**
706 * tb_service_debugfs_init() - Add debugfs directory for service
707 * @svc: Thunderbolt service pointer
708 *
709 * Adds debugfs directory for service.
710 */
711void tb_service_debugfs_init(struct tb_service *svc)
712{
713 svc->debugfs_dir = debugfs_create_dir(dev_name(&svc->dev),
714 tb_debugfs_root);
715}
716
717/**
718 * tb_service_debugfs_remove() - Remove service debugfs directory
719 * @svc: Thunderbolt service pointer
720 *
721 * Removes the previously created debugfs directory for @svc.
722 */
723void tb_service_debugfs_remove(struct tb_service *svc)
724{
725 debugfs_remove_recursive(svc->debugfs_dir);
726 svc->debugfs_dir = NULL;
727}
728
Gil Fine54e41812020-06-29 20:30:52 +0300729void tb_debugfs_init(void)
730{
731 tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
732}
733
734void tb_debugfs_exit(void)
735{
736 debugfs_remove_recursive(tb_debugfs_root);
737}