blob: dc1df5fb7b4cabc099ce2bfd7f7eb1ff12e6427f [file] [log] [blame]
Pierre-Louis Bossarte149ca22020-05-01 09:58:50 -05001// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
Liam Girdwood86b02f72019-04-12 11:05:08 -05002//
3// This file is provided under a dual BSD/GPLv2 license. When using or
4// redistributing this file, you may do so under either license.
5//
6// Copyright(c) 2018 Intel Corporation. All rights reserved.
7//
8// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9//
10// Generic debug routines used to export DSP MMIO and memories to userspace
11// for firmware debugging.
12//
13
14#include <linux/debugfs.h>
15#include <linux/io.h>
16#include <linux/pm_runtime.h>
Karol Trzcinski5b10b622020-11-24 20:00:17 +020017#include <sound/sof/ext_manifest.h>
18#include <sound/sof/debug.h>
Liam Girdwood86b02f72019-04-12 11:05:08 -050019#include "sof-priv.h"
20#include "ops.h"
21
Cezary Rojewski394695f2020-02-18 15:39:23 +010022#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
Peter Ujfalusi7bbdda82021-09-16 13:32:09 +030023#include "sof-probes.h"
Cezary Rojewski394695f2020-02-18 15:39:23 +010024
25/**
26 * strsplit_u32 - Split string into sequence of u32 tokens
27 * @buf: String to split into tokens.
28 * @delim: String containing delimiter characters.
29 * @tkns: Returned u32 sequence pointer.
30 * @num_tkns: Returned number of tokens obtained.
31 */
32static int
33strsplit_u32(char **buf, const char *delim, u32 **tkns, size_t *num_tkns)
34{
35 char *s;
36 u32 *data, *tmp;
37 size_t count = 0;
38 size_t cap = 32;
39 int ret = 0;
40
41 *tkns = NULL;
42 *num_tkns = 0;
43 data = kcalloc(cap, sizeof(*data), GFP_KERNEL);
44 if (!data)
45 return -ENOMEM;
46
47 while ((s = strsep(buf, delim)) != NULL) {
48 ret = kstrtouint(s, 0, data + count);
49 if (ret)
50 goto exit;
51 if (++count >= cap) {
52 cap *= 2;
53 tmp = krealloc(data, cap * sizeof(*data), GFP_KERNEL);
54 if (!tmp) {
55 ret = -ENOMEM;
56 goto exit;
57 }
58 data = tmp;
59 }
60 }
61
62 if (!count)
63 goto exit;
64 *tkns = kmemdup(data, count * sizeof(*data), GFP_KERNEL);
65 if (*tkns == NULL) {
66 ret = -ENOMEM;
67 goto exit;
68 }
69 *num_tkns = count;
70
71exit:
72 kfree(data);
73 return ret;
74}
75
76static int tokenize_input(const char __user *from, size_t count,
77 loff_t *ppos, u32 **tkns, size_t *num_tkns)
78{
79 char *buf;
80 int ret;
81
82 buf = kmalloc(count + 1, GFP_KERNEL);
83 if (!buf)
84 return -ENOMEM;
85
86 ret = simple_write_to_buffer(buf, count, ppos, from, count);
87 if (ret != count) {
88 ret = ret >= 0 ? -EIO : ret;
89 goto exit;
90 }
91
92 buf[count] = '\0';
93 ret = strsplit_u32((char **)&buf, ",", tkns, num_tkns);
94exit:
95 kfree(buf);
96 return ret;
97}
98
99static ssize_t probe_points_read(struct file *file,
100 char __user *to, size_t count, loff_t *ppos)
101{
102 struct snd_sof_dfsentry *dfse = file->private_data;
103 struct snd_sof_dev *sdev = dfse->sdev;
104 struct sof_probe_point_desc *desc;
105 size_t num_desc, len = 0;
106 char *buf;
107 int i, ret;
108
109 if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
110 dev_warn(sdev->dev, "no extractor stream running\n");
111 return -ENOENT;
112 }
113
114 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
115 if (!buf)
116 return -ENOMEM;
117
118 ret = sof_ipc_probe_points_info(sdev, &desc, &num_desc);
119 if (ret < 0)
120 goto exit;
121
122 for (i = 0; i < num_desc; i++) {
123 ret = snprintf(buf + len, PAGE_SIZE - len,
124 "Id: %#010x Purpose: %d Node id: %#x\n",
125 desc[i].buffer_id, desc[i].purpose, desc[i].stream_tag);
126 if (ret < 0)
127 goto free_desc;
128 len += ret;
129 }
130
131 ret = simple_read_from_buffer(to, count, ppos, buf, len);
132free_desc:
133 kfree(desc);
134exit:
135 kfree(buf);
136 return ret;
137}
138
139static ssize_t probe_points_write(struct file *file,
140 const char __user *from, size_t count, loff_t *ppos)
141{
142 struct snd_sof_dfsentry *dfse = file->private_data;
143 struct snd_sof_dev *sdev = dfse->sdev;
144 struct sof_probe_point_desc *desc;
145 size_t num_tkns, bytes;
146 u32 *tkns;
147 int ret;
148
149 if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
150 dev_warn(sdev->dev, "no extractor stream running\n");
151 return -ENOENT;
152 }
153
154 ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
155 if (ret < 0)
156 return ret;
157 bytes = sizeof(*tkns) * num_tkns;
158 if (!num_tkns || (bytes % sizeof(*desc))) {
159 ret = -EINVAL;
160 goto exit;
161 }
162
163 desc = (struct sof_probe_point_desc *)tkns;
164 ret = sof_ipc_probe_points_add(sdev,
165 desc, bytes / sizeof(*desc));
166 if (!ret)
167 ret = count;
168exit:
169 kfree(tkns);
170 return ret;
171}
172
173static const struct file_operations probe_points_fops = {
174 .open = simple_open,
175 .read = probe_points_read,
176 .write = probe_points_write,
177 .llseek = default_llseek,
178};
179
180static ssize_t probe_points_remove_write(struct file *file,
181 const char __user *from, size_t count, loff_t *ppos)
182{
183 struct snd_sof_dfsentry *dfse = file->private_data;
184 struct snd_sof_dev *sdev = dfse->sdev;
185 size_t num_tkns;
186 u32 *tkns;
187 int ret;
188
189 if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
190 dev_warn(sdev->dev, "no extractor stream running\n");
191 return -ENOENT;
192 }
193
194 ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
195 if (ret < 0)
196 return ret;
197 if (!num_tkns) {
198 ret = -EINVAL;
199 goto exit;
200 }
201
202 ret = sof_ipc_probe_points_remove(sdev, tkns, num_tkns);
203 if (!ret)
204 ret = count;
205exit:
206 kfree(tkns);
207 return ret;
208}
209
210static const struct file_operations probe_points_remove_fops = {
211 .open = simple_open,
212 .write = probe_points_remove_write,
213 .llseek = default_llseek,
214};
215
216static int snd_sof_debugfs_probe_item(struct snd_sof_dev *sdev,
217 const char *name, mode_t mode,
218 const struct file_operations *fops)
219{
220 struct snd_sof_dfsentry *dfse;
221
222 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
223 if (!dfse)
224 return -ENOMEM;
225
226 dfse->type = SOF_DFSENTRY_TYPE_BUF;
227 dfse->sdev = sdev;
228
229 debugfs_create_file(name, mode, sdev->debugfs_root, dfse, fops);
230 /* add to dfsentry list */
231 list_add(&dfse->list, &sdev->dfsentry_list);
232
233 return 0;
234}
235#endif
236
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500237#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
238#define MAX_IPC_FLOOD_DURATION_MS 1000
239#define MAX_IPC_FLOOD_COUNT 10000
240#define IPC_FLOOD_TEST_RESULT_LEN 512
241
242static int sof_debug_ipc_flood_test(struct snd_sof_dev *sdev,
243 struct snd_sof_dfsentry *dfse,
244 bool flood_duration_test,
245 unsigned long ipc_duration_ms,
246 unsigned long ipc_count)
247{
248 struct sof_ipc_cmd_hdr hdr;
249 struct sof_ipc_reply reply;
250 u64 min_response_time = U64_MAX;
251 ktime_t start, end, test_end;
252 u64 avg_response_time = 0;
253 u64 max_response_time = 0;
254 u64 ipc_response_time;
255 int i = 0;
256 int ret;
257
258 /* configure test IPC */
259 hdr.cmd = SOF_IPC_GLB_TEST_MSG | SOF_IPC_TEST_IPC_FLOOD;
260 hdr.size = sizeof(hdr);
261
262 /* set test end time for duration flood test */
263 if (flood_duration_test)
264 test_end = ktime_get_ns() + ipc_duration_ms * NSEC_PER_MSEC;
265
266 /* send test IPC's */
267 while (1) {
268 start = ktime_get();
269 ret = sof_ipc_tx_message(sdev->ipc, hdr.cmd, &hdr, hdr.size,
270 &reply, sizeof(reply));
271 end = ktime_get();
272
273 if (ret < 0)
274 break;
275
276 /* compute min and max response times */
277 ipc_response_time = ktime_to_ns(ktime_sub(end, start));
278 min_response_time = min(min_response_time, ipc_response_time);
279 max_response_time = max(max_response_time, ipc_response_time);
280
281 /* sum up response times */
282 avg_response_time += ipc_response_time;
283 i++;
284
285 /* test complete? */
286 if (flood_duration_test) {
287 if (ktime_to_ns(end) >= test_end)
288 break;
289 } else {
290 if (i == ipc_count)
291 break;
292 }
293 }
294
295 if (ret < 0)
296 dev_err(sdev->dev,
297 "error: ipc flood test failed at %d iterations\n", i);
298
299 /* return if the first IPC fails */
300 if (!i)
301 return ret;
302
303 /* compute average response time */
304 do_div(avg_response_time, i);
305
306 /* clear previous test output */
307 memset(dfse->cache_buf, 0, IPC_FLOOD_TEST_RESULT_LEN);
308
309 if (flood_duration_test) {
310 dev_dbg(sdev->dev, "IPC Flood test duration: %lums\n",
311 ipc_duration_ms);
312 snprintf(dfse->cache_buf, IPC_FLOOD_TEST_RESULT_LEN,
313 "IPC Flood test duration: %lums\n", ipc_duration_ms);
314 }
315
316 dev_dbg(sdev->dev,
317 "IPC Flood count: %d, Avg response time: %lluns\n",
318 i, avg_response_time);
319 dev_dbg(sdev->dev, "Max response time: %lluns\n",
320 max_response_time);
321 dev_dbg(sdev->dev, "Min response time: %lluns\n",
322 min_response_time);
323
324 /* format output string */
325 snprintf(dfse->cache_buf + strlen(dfse->cache_buf),
326 IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf),
327 "IPC Flood count: %d\nAvg response time: %lluns\n",
328 i, avg_response_time);
329
330 snprintf(dfse->cache_buf + strlen(dfse->cache_buf),
331 IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf),
332 "Max response time: %lluns\nMin response time: %lluns\n",
333 max_response_time, min_response_time);
334
335 return ret;
336}
337#endif
338
339static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
340 size_t count, loff_t *ppos)
341{
342#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
343 struct snd_sof_dfsentry *dfse = file->private_data;
344 struct snd_sof_dev *sdev = dfse->sdev;
345 unsigned long ipc_duration_ms = 0;
346 bool flood_duration_test = false;
347 unsigned long ipc_count = 0;
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200348 struct dentry *dentry;
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500349 int err;
350#endif
351 size_t size;
352 char *string;
353 int ret;
354
Hui Wang9037c3b2021-02-08 18:38:57 +0800355 string = kzalloc(count+1, GFP_KERNEL);
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500356 if (!string)
357 return -ENOMEM;
358
359 size = simple_write_to_buffer(string, count, ppos, buffer, count);
360 ret = size;
361
362#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
363 /*
364 * write op is only supported for ipc_flood_count or
365 * ipc_flood_duration_ms debugfs entries atm.
366 * ipc_flood_count floods the DSP with the number of IPC's specified.
367 * ipc_duration_ms test floods the DSP for the time specified
368 * in the debugfs entry.
369 */
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200370 dentry = file->f_path.dentry;
371 if (strcmp(dentry->d_name.name, "ipc_flood_count") &&
Navid Emamdoostc0a333d2019-10-27 14:48:47 -0500372 strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) {
373 ret = -EINVAL;
374 goto out;
375 }
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500376
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200377 if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500378 flood_duration_test = true;
379
380 /* test completion criterion */
381 if (flood_duration_test)
382 ret = kstrtoul(string, 0, &ipc_duration_ms);
383 else
384 ret = kstrtoul(string, 0, &ipc_count);
385 if (ret < 0)
Wei Yongjunb90bab32019-07-05 08:16:37 +0000386 goto out;
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500387
388 /* limit max duration/ipc count for flood test */
389 if (flood_duration_test) {
390 if (!ipc_duration_ms) {
391 ret = size;
392 goto out;
393 }
394
395 /* find the minimum. min() is not used to avoid warnings */
396 if (ipc_duration_ms > MAX_IPC_FLOOD_DURATION_MS)
397 ipc_duration_ms = MAX_IPC_FLOOD_DURATION_MS;
398 } else {
399 if (!ipc_count) {
400 ret = size;
401 goto out;
402 }
403
404 /* find the minimum. min() is not used to avoid warnings */
405 if (ipc_count > MAX_IPC_FLOOD_COUNT)
406 ipc_count = MAX_IPC_FLOOD_COUNT;
407 }
408
409 ret = pm_runtime_get_sync(sdev->dev);
Pierre-Louis Bossart7db6db92020-09-17 13:56:28 +0300410 if (ret < 0 && ret != -EACCES) {
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500411 dev_err_ratelimited(sdev->dev,
412 "error: debugfs write failed to resume %d\n",
413 ret);
414 pm_runtime_put_noidle(sdev->dev);
Wei Yongjunb90bab32019-07-05 08:16:37 +0000415 goto out;
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500416 }
417
418 /* flood test */
419 ret = sof_debug_ipc_flood_test(sdev, dfse, flood_duration_test,
420 ipc_duration_ms, ipc_count);
421
422 pm_runtime_mark_last_busy(sdev->dev);
423 err = pm_runtime_put_autosuspend(sdev->dev);
424 if (err < 0)
425 dev_err_ratelimited(sdev->dev,
426 "error: debugfs write failed to idle %d\n",
427 err);
428
429 /* return size if test is successful */
430 if (ret >= 0)
431 ret = size;
432out:
433#endif
434 kfree(string);
435 return ret;
436}
437
Liam Girdwood86b02f72019-04-12 11:05:08 -0500438static ssize_t sof_dfsentry_read(struct file *file, char __user *buffer,
439 size_t count, loff_t *ppos)
440{
441 struct snd_sof_dfsentry *dfse = file->private_data;
442 struct snd_sof_dev *sdev = dfse->sdev;
443 loff_t pos = *ppos;
444 size_t size_ret;
445 int skip = 0;
446 int size;
447 u8 *buf;
448
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500449#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
Mark Brownf6326fa2019-08-08 23:15:54 +0100450 struct dentry *dentry;
451
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200452 dentry = file->f_path.dentry;
453 if ((!strcmp(dentry->d_name.name, "ipc_flood_count") ||
Guennadi Liakhovetski97f53042021-03-15 11:39:32 -0500454 !strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))) {
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500455 if (*ppos)
456 return 0;
457
458 count = strlen(dfse->cache_buf);
459 size_ret = copy_to_user(buffer, dfse->cache_buf, count);
460 if (size_ret)
461 return -EFAULT;
462
463 *ppos += count;
464 return count;
465 }
466#endif
Liam Girdwood86b02f72019-04-12 11:05:08 -0500467 size = dfse->size;
468
469 /* validate position & count */
470 if (pos < 0)
471 return -EINVAL;
472 if (pos >= size || !count)
473 return 0;
474 /* find the minimum. min() is not used since it adds sparse warnings */
475 if (count > size - pos)
476 count = size - pos;
477
478 /* align io read start to u32 multiple */
479 pos = ALIGN_DOWN(pos, 4);
480
481 /* intermediate buffer size must be u32 multiple */
482 size = ALIGN(count, 4);
483
484 /* if start position is unaligned, read extra u32 */
485 if (unlikely(pos != *ppos)) {
486 skip = *ppos - pos;
487 if (pos + size + 4 < dfse->size)
488 size += 4;
489 }
490
491 buf = kzalloc(size, GFP_KERNEL);
492 if (!buf)
493 return -ENOMEM;
494
495 if (dfse->type == SOF_DFSENTRY_TYPE_IOMEM) {
496#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
497 /*
498 * If the DSP is active: copy from IO.
499 * If the DSP is suspended:
500 * - Copy from IO if the memory is always accessible.
501 * - Otherwise, copy from cached buffer.
502 */
503 if (pm_runtime_active(sdev->dev) ||
504 dfse->access_type == SOF_DEBUGFS_ACCESS_ALWAYS) {
505 memcpy_fromio(buf, dfse->io_mem + pos, size);
506 } else {
507 dev_info(sdev->dev,
508 "Copying cached debugfs data\n");
509 memcpy(buf, dfse->cache_buf + pos, size);
510 }
511#else
512 /* if the DSP is in D3 */
513 if (!pm_runtime_active(sdev->dev) &&
514 dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
515 dev_err(sdev->dev,
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200516 "error: debugfs entry cannot be read in DSP D3\n");
Liam Girdwood86b02f72019-04-12 11:05:08 -0500517 kfree(buf);
518 return -EINVAL;
519 }
520
521 memcpy_fromio(buf, dfse->io_mem + pos, size);
522#endif
523 } else {
524 memcpy(buf, ((u8 *)(dfse->buf) + pos), size);
525 }
526
527 /* copy to userspace */
528 size_ret = copy_to_user(buffer, buf + skip, count);
529
530 kfree(buf);
531
532 /* update count & position if copy succeeded */
533 if (size_ret)
534 return -EFAULT;
535
536 *ppos = pos + count;
537
538 return count;
539}
540
541static const struct file_operations sof_dfs_fops = {
542 .open = simple_open,
543 .read = sof_dfsentry_read,
544 .llseek = default_llseek,
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500545 .write = sof_dfsentry_write,
Liam Girdwood86b02f72019-04-12 11:05:08 -0500546};
547
548/* create FS entry for debug files that can expose DSP memories, registers */
Peter Ujfalusibde4f082021-09-15 15:21:16 +0300549static int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev,
550 void __iomem *base, size_t size,
551 const char *name,
552 enum sof_debugfs_access_type access_type)
Liam Girdwood86b02f72019-04-12 11:05:08 -0500553{
554 struct snd_sof_dfsentry *dfse;
555
556 if (!sdev)
557 return -EINVAL;
558
559 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
560 if (!dfse)
561 return -ENOMEM;
562
563 dfse->type = SOF_DFSENTRY_TYPE_IOMEM;
564 dfse->io_mem = base;
565 dfse->size = size;
566 dfse->sdev = sdev;
567 dfse->access_type = access_type;
568
569#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
570 /*
571 * allocate cache buffer that will be used to save the mem window
572 * contents prior to suspend
573 */
574 if (access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
575 dfse->cache_buf = devm_kzalloc(sdev->dev, size, GFP_KERNEL);
576 if (!dfse->cache_buf)
577 return -ENOMEM;
578 }
579#endif
580
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200581 debugfs_create_file(name, 0444, sdev->debugfs_root, dfse,
582 &sof_dfs_fops);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500583
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200584 /* add to dfsentry list */
585 list_add(&dfse->list, &sdev->dfsentry_list);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500586
587 return 0;
588}
Liam Girdwood86b02f72019-04-12 11:05:08 -0500589
Peter Ujfalusi07e833b2021-09-15 15:21:12 +0300590int snd_sof_debugfs_add_region_item_iomem(struct snd_sof_dev *sdev,
591 enum snd_sof_fw_blk_type blk_type, u32 offset,
592 size_t size, const char *name,
593 enum sof_debugfs_access_type access_type)
594{
595 int bar = snd_sof_dsp_get_bar_index(sdev, blk_type);
596
597 if (bar < 0)
598 return bar;
599
600 return snd_sof_debugfs_io_item(sdev, sdev->bar[bar] + offset, size, name,
601 access_type);
602}
603EXPORT_SYMBOL_GPL(snd_sof_debugfs_add_region_item_iomem);
604
Liam Girdwood86b02f72019-04-12 11:05:08 -0500605/* create FS entry for debug files to expose kernel memory */
606int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
607 void *base, size_t size,
Ranjani Sridharan5c9714f2019-06-03 11:18:18 -0500608 const char *name, mode_t mode)
Liam Girdwood86b02f72019-04-12 11:05:08 -0500609{
610 struct snd_sof_dfsentry *dfse;
611
612 if (!sdev)
613 return -EINVAL;
614
615 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
616 if (!dfse)
617 return -ENOMEM;
618
619 dfse->type = SOF_DFSENTRY_TYPE_BUF;
620 dfse->buf = base;
621 dfse->size = size;
622 dfse->sdev = sdev;
623
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500624#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
Guennadi Liakhovetski72c35852021-03-15 11:39:31 -0500625 if (!strncmp(name, "ipc_flood", strlen("ipc_flood"))) {
626 /*
627 * cache_buf is unused for SOF_DFSENTRY_TYPE_BUF debugfs entries.
628 * So, use it to save the results of the last IPC flood test.
629 */
630 dfse->cache_buf = devm_kzalloc(sdev->dev, IPC_FLOOD_TEST_RESULT_LEN,
631 GFP_KERNEL);
632 if (!dfse->cache_buf)
633 return -ENOMEM;
634 }
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500635#endif
636
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200637 debugfs_create_file(name, mode, sdev->debugfs_root, dfse,
638 &sof_dfs_fops);
639 /* add to dfsentry list */
640 list_add(&dfse->list, &sdev->dfsentry_list);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500641
642 return 0;
643}
644EXPORT_SYMBOL_GPL(snd_sof_debugfs_buf_item);
645
Karol Trzcinski5b10b622020-11-24 20:00:17 +0200646static int memory_info_update(struct snd_sof_dev *sdev, char *buf, size_t buff_size)
647{
648 struct sof_ipc_cmd_hdr msg = {
649 .size = sizeof(struct sof_ipc_cmd_hdr),
650 .cmd = SOF_IPC_GLB_DEBUG | SOF_IPC_DEBUG_MEM_USAGE,
651 };
652 struct sof_ipc_dbg_mem_usage *reply;
653 int len;
654 int ret;
655 int i;
656
657 reply = kmalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
658 if (!reply)
659 return -ENOMEM;
660
661 ret = pm_runtime_get_sync(sdev->dev);
662 if (ret < 0 && ret != -EACCES) {
663 pm_runtime_put_noidle(sdev->dev);
664 dev_err(sdev->dev, "error: enabling device failed: %d\n", ret);
665 goto error;
666 }
667
668 ret = sof_ipc_tx_message(sdev->ipc, msg.cmd, &msg, msg.size, reply, SOF_IPC_MSG_MAX_SIZE);
669 pm_runtime_mark_last_busy(sdev->dev);
670 pm_runtime_put_autosuspend(sdev->dev);
671 if (ret < 0 || reply->rhdr.error < 0) {
672 ret = min(ret, reply->rhdr.error);
673 dev_err(sdev->dev, "error: reading memory info failed, %d\n", ret);
674 goto error;
675 }
676
677 if (struct_size(reply, elems, reply->num_elems) != reply->rhdr.hdr.size) {
678 dev_err(sdev->dev, "error: invalid memory info ipc struct size, %d\n",
679 reply->rhdr.hdr.size);
680 ret = -EINVAL;
681 goto error;
682 }
683
684 for (i = 0, len = 0; i < reply->num_elems; i++) {
685 ret = snprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
686 reply->elems[i].zone, reply->elems[i].id,
687 reply->elems[i].used, reply->elems[i].free);
688 if (ret < 0)
689 goto error;
690 len += ret;
691 }
692
693 ret = len;
694error:
695 kfree(reply);
696 return ret;
697}
698
699static ssize_t memory_info_read(struct file *file, char __user *to, size_t count, loff_t *ppos)
700{
701 struct snd_sof_dfsentry *dfse = file->private_data;
702 struct snd_sof_dev *sdev = dfse->sdev;
703 int data_length;
704
705 /* read memory info from FW only once for each file read */
706 if (!*ppos) {
707 dfse->buf_data_size = 0;
708 data_length = memory_info_update(sdev, dfse->buf, dfse->size);
709 if (data_length < 0)
710 return data_length;
711 dfse->buf_data_size = data_length;
712 }
713
714 return simple_read_from_buffer(to, count, ppos, dfse->buf, dfse->buf_data_size);
715}
716
717static int memory_info_open(struct inode *inode, struct file *file)
718{
719 struct snd_sof_dfsentry *dfse = inode->i_private;
720 struct snd_sof_dev *sdev = dfse->sdev;
721
722 file->private_data = dfse;
723
724 /* allocate buffer memory only in first open run, to save memory when unused */
725 if (!dfse->buf) {
726 dfse->buf = devm_kmalloc(sdev->dev, PAGE_SIZE, GFP_KERNEL);
727 if (!dfse->buf)
728 return -ENOMEM;
729 dfse->size = PAGE_SIZE;
730 }
731
732 return 0;
733}
734
735static const struct file_operations memory_info_fops = {
736 .open = memory_info_open,
737 .read = memory_info_read,
738 .llseek = default_llseek,
739};
740
741int snd_sof_dbg_memory_info_init(struct snd_sof_dev *sdev)
742{
743 struct snd_sof_dfsentry *dfse;
744
745 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
746 if (!dfse)
747 return -ENOMEM;
748
749 /* don't allocate buffer before first usage, to save memory when unused */
750 dfse->type = SOF_DFSENTRY_TYPE_BUF;
751 dfse->sdev = sdev;
752
753 debugfs_create_file("memory_info", 0444, sdev->debugfs_root, dfse, &memory_info_fops);
754
755 /* add to dfsentry list */
756 list_add(&dfse->list, &sdev->dfsentry_list);
757 return 0;
758}
759EXPORT_SYMBOL_GPL(snd_sof_dbg_memory_info_init);
760
Liam Girdwood86b02f72019-04-12 11:05:08 -0500761int snd_sof_dbg_init(struct snd_sof_dev *sdev)
762{
763 const struct snd_sof_dsp_ops *ops = sof_ops(sdev);
764 const struct snd_sof_debugfs_map *map;
765 int i;
766 int err;
767
768 /* use "sof" as top level debugFS dir */
769 sdev->debugfs_root = debugfs_create_dir("sof", NULL);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500770
771 /* init dfsentry list */
772 INIT_LIST_HEAD(&sdev->dfsentry_list);
773
774 /* create debugFS files for platform specific MMIO/DSP memories */
775 for (i = 0; i < ops->debug_map_count; i++) {
776 map = &ops->debug_map[i];
777
778 err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] +
779 map->offset, map->size,
780 map->name, map->access_type);
781 /* errors are only due to memory allocation, not debugfs */
782 if (err < 0)
783 return err;
784 }
785
Cezary Rojewski394695f2020-02-18 15:39:23 +0100786#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
787 err = snd_sof_debugfs_probe_item(sdev, "probe_points",
788 0644, &probe_points_fops);
789 if (err < 0)
790 return err;
791 err = snd_sof_debugfs_probe_item(sdev, "probe_points_remove",
792 0200, &probe_points_remove_fops);
793 if (err < 0)
794 return err;
795#endif
796
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500797#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
798 /* create read-write ipc_flood_count debugfs entry */
799 err = snd_sof_debugfs_buf_item(sdev, NULL, 0,
800 "ipc_flood_count", 0666);
801
802 /* errors are only due to memory allocation, not debugfs */
803 if (err < 0)
804 return err;
805
806 /* create read-write ipc_flood_duration_ms debugfs entry */
807 err = snd_sof_debugfs_buf_item(sdev, NULL, 0,
808 "ipc_flood_duration_ms", 0666);
809
810 /* errors are only due to memory allocation, not debugfs */
811 if (err < 0)
812 return err;
813#endif
814
Liam Girdwood86b02f72019-04-12 11:05:08 -0500815 return 0;
816}
817EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
818
819void snd_sof_free_debug(struct snd_sof_dev *sdev)
820{
821 debugfs_remove_recursive(sdev->debugfs_root);
822}
823EXPORT_SYMBOL_GPL(snd_sof_free_debug);
Liam Girdwood9a065082019-09-27 15:05:29 -0500824
Peter Ujfalusic05ec072021-10-06 14:06:37 +0300825static const struct soc_fw_state_info {
826 enum snd_sof_fw_state state;
827 const char *name;
828} fw_state_dbg[] = {
829 {SOF_FW_BOOT_NOT_STARTED, "SOF_FW_BOOT_NOT_STARTED"},
830 {SOF_FW_BOOT_PREPARE, "SOF_FW_BOOT_PREPARE"},
831 {SOF_FW_BOOT_IN_PROGRESS, "SOF_FW_BOOT_IN_PROGRESS"},
832 {SOF_FW_BOOT_FAILED, "SOF_FW_BOOT_FAILED"},
833 {SOF_FW_BOOT_READY_FAILED, "SOF_FW_BOOT_READY_FAILED"},
834 {SOF_FW_BOOT_COMPLETE, "SOF_FW_BOOT_COMPLETE"},
835};
836
837static void snd_sof_dbg_print_fw_state(struct snd_sof_dev *sdev)
838{
839 int i;
840
841 for (i = 0; i < ARRAY_SIZE(fw_state_dbg); i++) {
842 if (sdev->fw_state == fw_state_dbg[i].state) {
843 dev_err(sdev->dev, "fw_state: %s (%d)\n", fw_state_dbg[i].name, i);
844 return;
845 }
846 }
847
848 dev_err(sdev->dev, "fw_state: UNKNOWN (%d)\n", sdev->fw_state);
849}
850
Peter Ujfalusi360fa322021-10-06 14:06:33 +0300851void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags)
852{
Peter Ujfalusi34346a32021-10-06 14:06:34 +0300853 bool print_all = !!(sof_core_debug & SOF_DBG_PRINT_ALL_DUMPS);
854
855 if (flags & SOF_DBG_DUMP_OPTIONAL && !print_all)
856 return;
857
Peter Ujfalusi360fa322021-10-06 14:06:33 +0300858 if (sof_ops(sdev)->dbg_dump && !sdev->dbg_dump_printed) {
859 dev_err(sdev->dev, "------------[ DSP dump start ]------------\n");
Peter Ujfalusic05ec072021-10-06 14:06:37 +0300860 snd_sof_dbg_print_fw_state(sdev);
Peter Ujfalusi360fa322021-10-06 14:06:33 +0300861 sof_ops(sdev)->dbg_dump(sdev, flags);
862 dev_err(sdev->dev, "------------[ DSP dump end ]------------\n");
Peter Ujfalusi34346a32021-10-06 14:06:34 +0300863 if (!print_all)
Peter Ujfalusi360fa322021-10-06 14:06:33 +0300864 sdev->dbg_dump_printed = true;
865 }
866}
867EXPORT_SYMBOL(snd_sof_dsp_dbg_dump);
868
869static void snd_sof_ipc_dump(struct snd_sof_dev *sdev)
870{
871 if (sof_ops(sdev)->ipc_dump && !sdev->ipc_dump_printed) {
872 dev_err(sdev->dev, "------------[ IPC dump start ]------------\n");
873 sof_ops(sdev)->ipc_dump(sdev);
874 dev_err(sdev->dev, "------------[ IPC dump end ]------------\n");
875 if (!(sof_core_debug & SOF_DBG_PRINT_ALL_DUMPS))
876 sdev->ipc_dump_printed = true;
877 }
878}
879
Liam Girdwood9a065082019-09-27 15:05:29 -0500880void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev)
881{
882 if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) ||
883 (sof_core_debug & SOF_DBG_RETAIN_CTX)) {
884 /* should we prevent DSP entering D3 ? */
Peter Ujfalusi9ff90852021-10-06 14:06:30 +0300885 if (!sdev->ipc_dump_printed)
886 dev_info(sdev->dev,
887 "preventing DSP entering D3 state to preserve context\n");
Liam Girdwood9a065082019-09-27 15:05:29 -0500888 pm_runtime_get_noresume(sdev->dev);
889 }
890
891 /* dump vital information to the logs */
Liam Girdwood9a065082019-09-27 15:05:29 -0500892 snd_sof_ipc_dump(sdev);
Peter Ujfalusie85c26e2021-10-06 14:06:28 +0300893 snd_sof_dsp_dbg_dump(sdev, SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
Liam Girdwood9a065082019-09-27 15:05:29 -0500894 snd_sof_trace_notify_for_error(sdev);
895}
896EXPORT_SYMBOL(snd_sof_handle_fw_exception);