blob: 715a374b33cfb4670474d41d7f7b3e9fee16b4de [file] [log] [blame]
Pierre-Louis Bossarte149ca22020-05-01 09:58:50 -05001// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
Liam Girdwood86b02f72019-04-12 11:05:08 -05002//
3// This file is provided under a dual BSD/GPLv2 license. When using or
4// redistributing this file, you may do so under either license.
5//
6// Copyright(c) 2018 Intel Corporation. All rights reserved.
7//
8// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9//
10// Generic debug routines used to export DSP MMIO and memories to userspace
11// for firmware debugging.
12//
13
14#include <linux/debugfs.h>
15#include <linux/io.h>
16#include <linux/pm_runtime.h>
Karol Trzcinski5b10b622020-11-24 20:00:17 +020017#include <sound/sof/ext_manifest.h>
18#include <sound/sof/debug.h>
Liam Girdwood86b02f72019-04-12 11:05:08 -050019#include "sof-priv.h"
20#include "ops.h"
21
Cezary Rojewski394695f2020-02-18 15:39:23 +010022#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
23#include "probe.h"
24
25/**
26 * strsplit_u32 - Split string into sequence of u32 tokens
27 * @buf: String to split into tokens.
28 * @delim: String containing delimiter characters.
29 * @tkns: Returned u32 sequence pointer.
30 * @num_tkns: Returned number of tokens obtained.
31 */
32static int
33strsplit_u32(char **buf, const char *delim, u32 **tkns, size_t *num_tkns)
34{
35 char *s;
36 u32 *data, *tmp;
37 size_t count = 0;
38 size_t cap = 32;
39 int ret = 0;
40
41 *tkns = NULL;
42 *num_tkns = 0;
43 data = kcalloc(cap, sizeof(*data), GFP_KERNEL);
44 if (!data)
45 return -ENOMEM;
46
47 while ((s = strsep(buf, delim)) != NULL) {
48 ret = kstrtouint(s, 0, data + count);
49 if (ret)
50 goto exit;
51 if (++count >= cap) {
52 cap *= 2;
53 tmp = krealloc(data, cap * sizeof(*data), GFP_KERNEL);
54 if (!tmp) {
55 ret = -ENOMEM;
56 goto exit;
57 }
58 data = tmp;
59 }
60 }
61
62 if (!count)
63 goto exit;
64 *tkns = kmemdup(data, count * sizeof(*data), GFP_KERNEL);
65 if (*tkns == NULL) {
66 ret = -ENOMEM;
67 goto exit;
68 }
69 *num_tkns = count;
70
71exit:
72 kfree(data);
73 return ret;
74}
75
76static int tokenize_input(const char __user *from, size_t count,
77 loff_t *ppos, u32 **tkns, size_t *num_tkns)
78{
79 char *buf;
80 int ret;
81
82 buf = kmalloc(count + 1, GFP_KERNEL);
83 if (!buf)
84 return -ENOMEM;
85
86 ret = simple_write_to_buffer(buf, count, ppos, from, count);
87 if (ret != count) {
88 ret = ret >= 0 ? -EIO : ret;
89 goto exit;
90 }
91
92 buf[count] = '\0';
93 ret = strsplit_u32((char **)&buf, ",", tkns, num_tkns);
94exit:
95 kfree(buf);
96 return ret;
97}
98
99static ssize_t probe_points_read(struct file *file,
100 char __user *to, size_t count, loff_t *ppos)
101{
102 struct snd_sof_dfsentry *dfse = file->private_data;
103 struct snd_sof_dev *sdev = dfse->sdev;
104 struct sof_probe_point_desc *desc;
105 size_t num_desc, len = 0;
106 char *buf;
107 int i, ret;
108
109 if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
110 dev_warn(sdev->dev, "no extractor stream running\n");
111 return -ENOENT;
112 }
113
114 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
115 if (!buf)
116 return -ENOMEM;
117
118 ret = sof_ipc_probe_points_info(sdev, &desc, &num_desc);
119 if (ret < 0)
120 goto exit;
121
122 for (i = 0; i < num_desc; i++) {
123 ret = snprintf(buf + len, PAGE_SIZE - len,
124 "Id: %#010x Purpose: %d Node id: %#x\n",
125 desc[i].buffer_id, desc[i].purpose, desc[i].stream_tag);
126 if (ret < 0)
127 goto free_desc;
128 len += ret;
129 }
130
131 ret = simple_read_from_buffer(to, count, ppos, buf, len);
132free_desc:
133 kfree(desc);
134exit:
135 kfree(buf);
136 return ret;
137}
138
139static ssize_t probe_points_write(struct file *file,
140 const char __user *from, size_t count, loff_t *ppos)
141{
142 struct snd_sof_dfsentry *dfse = file->private_data;
143 struct snd_sof_dev *sdev = dfse->sdev;
144 struct sof_probe_point_desc *desc;
145 size_t num_tkns, bytes;
146 u32 *tkns;
147 int ret;
148
149 if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
150 dev_warn(sdev->dev, "no extractor stream running\n");
151 return -ENOENT;
152 }
153
154 ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
155 if (ret < 0)
156 return ret;
157 bytes = sizeof(*tkns) * num_tkns;
158 if (!num_tkns || (bytes % sizeof(*desc))) {
159 ret = -EINVAL;
160 goto exit;
161 }
162
163 desc = (struct sof_probe_point_desc *)tkns;
164 ret = sof_ipc_probe_points_add(sdev,
165 desc, bytes / sizeof(*desc));
166 if (!ret)
167 ret = count;
168exit:
169 kfree(tkns);
170 return ret;
171}
172
173static const struct file_operations probe_points_fops = {
174 .open = simple_open,
175 .read = probe_points_read,
176 .write = probe_points_write,
177 .llseek = default_llseek,
178};
179
180static ssize_t probe_points_remove_write(struct file *file,
181 const char __user *from, size_t count, loff_t *ppos)
182{
183 struct snd_sof_dfsentry *dfse = file->private_data;
184 struct snd_sof_dev *sdev = dfse->sdev;
185 size_t num_tkns;
186 u32 *tkns;
187 int ret;
188
189 if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
190 dev_warn(sdev->dev, "no extractor stream running\n");
191 return -ENOENT;
192 }
193
194 ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
195 if (ret < 0)
196 return ret;
197 if (!num_tkns) {
198 ret = -EINVAL;
199 goto exit;
200 }
201
202 ret = sof_ipc_probe_points_remove(sdev, tkns, num_tkns);
203 if (!ret)
204 ret = count;
205exit:
206 kfree(tkns);
207 return ret;
208}
209
210static const struct file_operations probe_points_remove_fops = {
211 .open = simple_open,
212 .write = probe_points_remove_write,
213 .llseek = default_llseek,
214};
215
216static int snd_sof_debugfs_probe_item(struct snd_sof_dev *sdev,
217 const char *name, mode_t mode,
218 const struct file_operations *fops)
219{
220 struct snd_sof_dfsentry *dfse;
221
222 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
223 if (!dfse)
224 return -ENOMEM;
225
226 dfse->type = SOF_DFSENTRY_TYPE_BUF;
227 dfse->sdev = sdev;
228
229 debugfs_create_file(name, mode, sdev->debugfs_root, dfse, fops);
230 /* add to dfsentry list */
231 list_add(&dfse->list, &sdev->dfsentry_list);
232
233 return 0;
234}
235#endif
236
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500237#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
238#define MAX_IPC_FLOOD_DURATION_MS 1000
239#define MAX_IPC_FLOOD_COUNT 10000
240#define IPC_FLOOD_TEST_RESULT_LEN 512
241
242static int sof_debug_ipc_flood_test(struct snd_sof_dev *sdev,
243 struct snd_sof_dfsentry *dfse,
244 bool flood_duration_test,
245 unsigned long ipc_duration_ms,
246 unsigned long ipc_count)
247{
248 struct sof_ipc_cmd_hdr hdr;
249 struct sof_ipc_reply reply;
250 u64 min_response_time = U64_MAX;
251 ktime_t start, end, test_end;
252 u64 avg_response_time = 0;
253 u64 max_response_time = 0;
254 u64 ipc_response_time;
255 int i = 0;
256 int ret;
257
258 /* configure test IPC */
259 hdr.cmd = SOF_IPC_GLB_TEST_MSG | SOF_IPC_TEST_IPC_FLOOD;
260 hdr.size = sizeof(hdr);
261
262 /* set test end time for duration flood test */
263 if (flood_duration_test)
264 test_end = ktime_get_ns() + ipc_duration_ms * NSEC_PER_MSEC;
265
266 /* send test IPC's */
267 while (1) {
268 start = ktime_get();
269 ret = sof_ipc_tx_message(sdev->ipc, hdr.cmd, &hdr, hdr.size,
270 &reply, sizeof(reply));
271 end = ktime_get();
272
273 if (ret < 0)
274 break;
275
276 /* compute min and max response times */
277 ipc_response_time = ktime_to_ns(ktime_sub(end, start));
278 min_response_time = min(min_response_time, ipc_response_time);
279 max_response_time = max(max_response_time, ipc_response_time);
280
281 /* sum up response times */
282 avg_response_time += ipc_response_time;
283 i++;
284
285 /* test complete? */
286 if (flood_duration_test) {
287 if (ktime_to_ns(end) >= test_end)
288 break;
289 } else {
290 if (i == ipc_count)
291 break;
292 }
293 }
294
295 if (ret < 0)
296 dev_err(sdev->dev,
297 "error: ipc flood test failed at %d iterations\n", i);
298
299 /* return if the first IPC fails */
300 if (!i)
301 return ret;
302
303 /* compute average response time */
304 do_div(avg_response_time, i);
305
306 /* clear previous test output */
307 memset(dfse->cache_buf, 0, IPC_FLOOD_TEST_RESULT_LEN);
308
309 if (flood_duration_test) {
310 dev_dbg(sdev->dev, "IPC Flood test duration: %lums\n",
311 ipc_duration_ms);
312 snprintf(dfse->cache_buf, IPC_FLOOD_TEST_RESULT_LEN,
313 "IPC Flood test duration: %lums\n", ipc_duration_ms);
314 }
315
316 dev_dbg(sdev->dev,
317 "IPC Flood count: %d, Avg response time: %lluns\n",
318 i, avg_response_time);
319 dev_dbg(sdev->dev, "Max response time: %lluns\n",
320 max_response_time);
321 dev_dbg(sdev->dev, "Min response time: %lluns\n",
322 min_response_time);
323
324 /* format output string */
325 snprintf(dfse->cache_buf + strlen(dfse->cache_buf),
326 IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf),
327 "IPC Flood count: %d\nAvg response time: %lluns\n",
328 i, avg_response_time);
329
330 snprintf(dfse->cache_buf + strlen(dfse->cache_buf),
331 IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf),
332 "Max response time: %lluns\nMin response time: %lluns\n",
333 max_response_time, min_response_time);
334
335 return ret;
336}
337#endif
338
339static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
340 size_t count, loff_t *ppos)
341{
342#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
343 struct snd_sof_dfsentry *dfse = file->private_data;
344 struct snd_sof_dev *sdev = dfse->sdev;
345 unsigned long ipc_duration_ms = 0;
346 bool flood_duration_test = false;
347 unsigned long ipc_count = 0;
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200348 struct dentry *dentry;
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500349 int err;
350#endif
351 size_t size;
352 char *string;
353 int ret;
354
Hui Wang9037c3b2021-02-08 18:38:57 +0800355 string = kzalloc(count+1, GFP_KERNEL);
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500356 if (!string)
357 return -ENOMEM;
358
359 size = simple_write_to_buffer(string, count, ppos, buffer, count);
360 ret = size;
361
362#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
363 /*
364 * write op is only supported for ipc_flood_count or
365 * ipc_flood_duration_ms debugfs entries atm.
366 * ipc_flood_count floods the DSP with the number of IPC's specified.
367 * ipc_duration_ms test floods the DSP for the time specified
368 * in the debugfs entry.
369 */
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200370 dentry = file->f_path.dentry;
371 if (strcmp(dentry->d_name.name, "ipc_flood_count") &&
Navid Emamdoostc0a333d2019-10-27 14:48:47 -0500372 strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) {
373 ret = -EINVAL;
374 goto out;
375 }
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500376
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200377 if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500378 flood_duration_test = true;
379
380 /* test completion criterion */
381 if (flood_duration_test)
382 ret = kstrtoul(string, 0, &ipc_duration_ms);
383 else
384 ret = kstrtoul(string, 0, &ipc_count);
385 if (ret < 0)
Wei Yongjunb90bab32019-07-05 08:16:37 +0000386 goto out;
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500387
388 /* limit max duration/ipc count for flood test */
389 if (flood_duration_test) {
390 if (!ipc_duration_ms) {
391 ret = size;
392 goto out;
393 }
394
395 /* find the minimum. min() is not used to avoid warnings */
396 if (ipc_duration_ms > MAX_IPC_FLOOD_DURATION_MS)
397 ipc_duration_ms = MAX_IPC_FLOOD_DURATION_MS;
398 } else {
399 if (!ipc_count) {
400 ret = size;
401 goto out;
402 }
403
404 /* find the minimum. min() is not used to avoid warnings */
405 if (ipc_count > MAX_IPC_FLOOD_COUNT)
406 ipc_count = MAX_IPC_FLOOD_COUNT;
407 }
408
409 ret = pm_runtime_get_sync(sdev->dev);
Pierre-Louis Bossart7db6db92020-09-17 13:56:28 +0300410 if (ret < 0 && ret != -EACCES) {
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500411 dev_err_ratelimited(sdev->dev,
412 "error: debugfs write failed to resume %d\n",
413 ret);
414 pm_runtime_put_noidle(sdev->dev);
Wei Yongjunb90bab32019-07-05 08:16:37 +0000415 goto out;
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500416 }
417
418 /* flood test */
419 ret = sof_debug_ipc_flood_test(sdev, dfse, flood_duration_test,
420 ipc_duration_ms, ipc_count);
421
422 pm_runtime_mark_last_busy(sdev->dev);
423 err = pm_runtime_put_autosuspend(sdev->dev);
424 if (err < 0)
425 dev_err_ratelimited(sdev->dev,
426 "error: debugfs write failed to idle %d\n",
427 err);
428
429 /* return size if test is successful */
430 if (ret >= 0)
431 ret = size;
432out:
433#endif
434 kfree(string);
435 return ret;
436}
437
Liam Girdwood86b02f72019-04-12 11:05:08 -0500438static ssize_t sof_dfsentry_read(struct file *file, char __user *buffer,
439 size_t count, loff_t *ppos)
440{
441 struct snd_sof_dfsentry *dfse = file->private_data;
442 struct snd_sof_dev *sdev = dfse->sdev;
443 loff_t pos = *ppos;
444 size_t size_ret;
445 int skip = 0;
446 int size;
447 u8 *buf;
448
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500449#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
Mark Brownf6326fa2019-08-08 23:15:54 +0100450 struct dentry *dentry;
451
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200452 dentry = file->f_path.dentry;
453 if ((!strcmp(dentry->d_name.name, "ipc_flood_count") ||
454 !strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) &&
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500455 dfse->cache_buf) {
456 if (*ppos)
457 return 0;
458
459 count = strlen(dfse->cache_buf);
460 size_ret = copy_to_user(buffer, dfse->cache_buf, count);
461 if (size_ret)
462 return -EFAULT;
463
464 *ppos += count;
465 return count;
466 }
467#endif
Liam Girdwood86b02f72019-04-12 11:05:08 -0500468 size = dfse->size;
469
470 /* validate position & count */
471 if (pos < 0)
472 return -EINVAL;
473 if (pos >= size || !count)
474 return 0;
475 /* find the minimum. min() is not used since it adds sparse warnings */
476 if (count > size - pos)
477 count = size - pos;
478
479 /* align io read start to u32 multiple */
480 pos = ALIGN_DOWN(pos, 4);
481
482 /* intermediate buffer size must be u32 multiple */
483 size = ALIGN(count, 4);
484
485 /* if start position is unaligned, read extra u32 */
486 if (unlikely(pos != *ppos)) {
487 skip = *ppos - pos;
488 if (pos + size + 4 < dfse->size)
489 size += 4;
490 }
491
492 buf = kzalloc(size, GFP_KERNEL);
493 if (!buf)
494 return -ENOMEM;
495
496 if (dfse->type == SOF_DFSENTRY_TYPE_IOMEM) {
497#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
498 /*
499 * If the DSP is active: copy from IO.
500 * If the DSP is suspended:
501 * - Copy from IO if the memory is always accessible.
502 * - Otherwise, copy from cached buffer.
503 */
504 if (pm_runtime_active(sdev->dev) ||
505 dfse->access_type == SOF_DEBUGFS_ACCESS_ALWAYS) {
506 memcpy_fromio(buf, dfse->io_mem + pos, size);
507 } else {
508 dev_info(sdev->dev,
509 "Copying cached debugfs data\n");
510 memcpy(buf, dfse->cache_buf + pos, size);
511 }
512#else
513 /* if the DSP is in D3 */
514 if (!pm_runtime_active(sdev->dev) &&
515 dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
516 dev_err(sdev->dev,
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200517 "error: debugfs entry cannot be read in DSP D3\n");
Liam Girdwood86b02f72019-04-12 11:05:08 -0500518 kfree(buf);
519 return -EINVAL;
520 }
521
522 memcpy_fromio(buf, dfse->io_mem + pos, size);
523#endif
524 } else {
525 memcpy(buf, ((u8 *)(dfse->buf) + pos), size);
526 }
527
528 /* copy to userspace */
529 size_ret = copy_to_user(buffer, buf + skip, count);
530
531 kfree(buf);
532
533 /* update count & position if copy succeeded */
534 if (size_ret)
535 return -EFAULT;
536
537 *ppos = pos + count;
538
539 return count;
540}
541
542static const struct file_operations sof_dfs_fops = {
543 .open = simple_open,
544 .read = sof_dfsentry_read,
545 .llseek = default_llseek,
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500546 .write = sof_dfsentry_write,
Liam Girdwood86b02f72019-04-12 11:05:08 -0500547};
548
549/* create FS entry for debug files that can expose DSP memories, registers */
550int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev,
551 void __iomem *base, size_t size,
552 const char *name,
553 enum sof_debugfs_access_type access_type)
554{
555 struct snd_sof_dfsentry *dfse;
556
557 if (!sdev)
558 return -EINVAL;
559
560 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
561 if (!dfse)
562 return -ENOMEM;
563
564 dfse->type = SOF_DFSENTRY_TYPE_IOMEM;
565 dfse->io_mem = base;
566 dfse->size = size;
567 dfse->sdev = sdev;
568 dfse->access_type = access_type;
569
570#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
571 /*
572 * allocate cache buffer that will be used to save the mem window
573 * contents prior to suspend
574 */
575 if (access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
576 dfse->cache_buf = devm_kzalloc(sdev->dev, size, GFP_KERNEL);
577 if (!dfse->cache_buf)
578 return -ENOMEM;
579 }
580#endif
581
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200582 debugfs_create_file(name, 0444, sdev->debugfs_root, dfse,
583 &sof_dfs_fops);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500584
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200585 /* add to dfsentry list */
586 list_add(&dfse->list, &sdev->dfsentry_list);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500587
588 return 0;
589}
590EXPORT_SYMBOL_GPL(snd_sof_debugfs_io_item);
591
592/* create FS entry for debug files to expose kernel memory */
593int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
594 void *base, size_t size,
Ranjani Sridharan5c9714f2019-06-03 11:18:18 -0500595 const char *name, mode_t mode)
Liam Girdwood86b02f72019-04-12 11:05:08 -0500596{
597 struct snd_sof_dfsentry *dfse;
598
599 if (!sdev)
600 return -EINVAL;
601
602 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
603 if (!dfse)
604 return -ENOMEM;
605
606 dfse->type = SOF_DFSENTRY_TYPE_BUF;
607 dfse->buf = base;
608 dfse->size = size;
609 dfse->sdev = sdev;
610
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500611#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
612 /*
613 * cache_buf is unused for SOF_DFSENTRY_TYPE_BUF debugfs entries.
614 * So, use it to save the results of the last IPC flood test.
615 */
616 dfse->cache_buf = devm_kzalloc(sdev->dev, IPC_FLOOD_TEST_RESULT_LEN,
617 GFP_KERNEL);
618 if (!dfse->cache_buf)
619 return -ENOMEM;
620#endif
621
Greg Kroah-Hartman3ff3a4f2019-07-31 15:17:16 +0200622 debugfs_create_file(name, mode, sdev->debugfs_root, dfse,
623 &sof_dfs_fops);
624 /* add to dfsentry list */
625 list_add(&dfse->list, &sdev->dfsentry_list);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500626
627 return 0;
628}
629EXPORT_SYMBOL_GPL(snd_sof_debugfs_buf_item);
630
Karol Trzcinski5b10b622020-11-24 20:00:17 +0200631static int memory_info_update(struct snd_sof_dev *sdev, char *buf, size_t buff_size)
632{
633 struct sof_ipc_cmd_hdr msg = {
634 .size = sizeof(struct sof_ipc_cmd_hdr),
635 .cmd = SOF_IPC_GLB_DEBUG | SOF_IPC_DEBUG_MEM_USAGE,
636 };
637 struct sof_ipc_dbg_mem_usage *reply;
638 int len;
639 int ret;
640 int i;
641
642 reply = kmalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
643 if (!reply)
644 return -ENOMEM;
645
646 ret = pm_runtime_get_sync(sdev->dev);
647 if (ret < 0 && ret != -EACCES) {
648 pm_runtime_put_noidle(sdev->dev);
649 dev_err(sdev->dev, "error: enabling device failed: %d\n", ret);
650 goto error;
651 }
652
653 ret = sof_ipc_tx_message(sdev->ipc, msg.cmd, &msg, msg.size, reply, SOF_IPC_MSG_MAX_SIZE);
654 pm_runtime_mark_last_busy(sdev->dev);
655 pm_runtime_put_autosuspend(sdev->dev);
656 if (ret < 0 || reply->rhdr.error < 0) {
657 ret = min(ret, reply->rhdr.error);
658 dev_err(sdev->dev, "error: reading memory info failed, %d\n", ret);
659 goto error;
660 }
661
662 if (struct_size(reply, elems, reply->num_elems) != reply->rhdr.hdr.size) {
663 dev_err(sdev->dev, "error: invalid memory info ipc struct size, %d\n",
664 reply->rhdr.hdr.size);
665 ret = -EINVAL;
666 goto error;
667 }
668
669 for (i = 0, len = 0; i < reply->num_elems; i++) {
670 ret = snprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
671 reply->elems[i].zone, reply->elems[i].id,
672 reply->elems[i].used, reply->elems[i].free);
673 if (ret < 0)
674 goto error;
675 len += ret;
676 }
677
678 ret = len;
679error:
680 kfree(reply);
681 return ret;
682}
683
684static ssize_t memory_info_read(struct file *file, char __user *to, size_t count, loff_t *ppos)
685{
686 struct snd_sof_dfsentry *dfse = file->private_data;
687 struct snd_sof_dev *sdev = dfse->sdev;
688 int data_length;
689
690 /* read memory info from FW only once for each file read */
691 if (!*ppos) {
692 dfse->buf_data_size = 0;
693 data_length = memory_info_update(sdev, dfse->buf, dfse->size);
694 if (data_length < 0)
695 return data_length;
696 dfse->buf_data_size = data_length;
697 }
698
699 return simple_read_from_buffer(to, count, ppos, dfse->buf, dfse->buf_data_size);
700}
701
702static int memory_info_open(struct inode *inode, struct file *file)
703{
704 struct snd_sof_dfsentry *dfse = inode->i_private;
705 struct snd_sof_dev *sdev = dfse->sdev;
706
707 file->private_data = dfse;
708
709 /* allocate buffer memory only in first open run, to save memory when unused */
710 if (!dfse->buf) {
711 dfse->buf = devm_kmalloc(sdev->dev, PAGE_SIZE, GFP_KERNEL);
712 if (!dfse->buf)
713 return -ENOMEM;
714 dfse->size = PAGE_SIZE;
715 }
716
717 return 0;
718}
719
720static const struct file_operations memory_info_fops = {
721 .open = memory_info_open,
722 .read = memory_info_read,
723 .llseek = default_llseek,
724};
725
726int snd_sof_dbg_memory_info_init(struct snd_sof_dev *sdev)
727{
728 struct snd_sof_dfsentry *dfse;
729
730 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
731 if (!dfse)
732 return -ENOMEM;
733
734 /* don't allocate buffer before first usage, to save memory when unused */
735 dfse->type = SOF_DFSENTRY_TYPE_BUF;
736 dfse->sdev = sdev;
737
738 debugfs_create_file("memory_info", 0444, sdev->debugfs_root, dfse, &memory_info_fops);
739
740 /* add to dfsentry list */
741 list_add(&dfse->list, &sdev->dfsentry_list);
742 return 0;
743}
744EXPORT_SYMBOL_GPL(snd_sof_dbg_memory_info_init);
745
Liam Girdwood86b02f72019-04-12 11:05:08 -0500746int snd_sof_dbg_init(struct snd_sof_dev *sdev)
747{
748 const struct snd_sof_dsp_ops *ops = sof_ops(sdev);
749 const struct snd_sof_debugfs_map *map;
750 int i;
751 int err;
752
753 /* use "sof" as top level debugFS dir */
754 sdev->debugfs_root = debugfs_create_dir("sof", NULL);
Liam Girdwood86b02f72019-04-12 11:05:08 -0500755
756 /* init dfsentry list */
757 INIT_LIST_HEAD(&sdev->dfsentry_list);
758
759 /* create debugFS files for platform specific MMIO/DSP memories */
760 for (i = 0; i < ops->debug_map_count; i++) {
761 map = &ops->debug_map[i];
762
763 err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] +
764 map->offset, map->size,
765 map->name, map->access_type);
766 /* errors are only due to memory allocation, not debugfs */
767 if (err < 0)
768 return err;
769 }
770
Cezary Rojewski394695f2020-02-18 15:39:23 +0100771#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
772 err = snd_sof_debugfs_probe_item(sdev, "probe_points",
773 0644, &probe_points_fops);
774 if (err < 0)
775 return err;
776 err = snd_sof_debugfs_probe_item(sdev, "probe_points_remove",
777 0200, &probe_points_remove_fops);
778 if (err < 0)
779 return err;
780#endif
781
Ranjani Sridharan091c12e2019-06-03 11:18:20 -0500782#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
783 /* create read-write ipc_flood_count debugfs entry */
784 err = snd_sof_debugfs_buf_item(sdev, NULL, 0,
785 "ipc_flood_count", 0666);
786
787 /* errors are only due to memory allocation, not debugfs */
788 if (err < 0)
789 return err;
790
791 /* create read-write ipc_flood_duration_ms debugfs entry */
792 err = snd_sof_debugfs_buf_item(sdev, NULL, 0,
793 "ipc_flood_duration_ms", 0666);
794
795 /* errors are only due to memory allocation, not debugfs */
796 if (err < 0)
797 return err;
798#endif
799
Liam Girdwood86b02f72019-04-12 11:05:08 -0500800 return 0;
801}
802EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
803
804void snd_sof_free_debug(struct snd_sof_dev *sdev)
805{
806 debugfs_remove_recursive(sdev->debugfs_root);
807}
808EXPORT_SYMBOL_GPL(snd_sof_free_debug);
Liam Girdwood9a065082019-09-27 15:05:29 -0500809
810void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev)
811{
812 if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) ||
813 (sof_core_debug & SOF_DBG_RETAIN_CTX)) {
814 /* should we prevent DSP entering D3 ? */
815 dev_info(sdev->dev, "info: preventing DSP entering D3 state to preserve context\n");
816 pm_runtime_get_noresume(sdev->dev);
817 }
818
819 /* dump vital information to the logs */
Ranjani Sridharanfbfa22e2020-12-11 12:07:42 +0200820 snd_sof_dsp_dbg_dump(sdev, SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
Liam Girdwood9a065082019-09-27 15:05:29 -0500821 snd_sof_ipc_dump(sdev);
822 snd_sof_trace_notify_for_error(sdev);
823}
824EXPORT_SYMBOL(snd_sof_handle_fw_exception);