Liam Girdwood | fd51c47 | 2019-04-12 11:05:12 -0500 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
| 2 | // |
| 3 | // This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | // redistributing this file, you may do so under either license. |
| 5 | // |
| 6 | // Copyright(c) 2018 Intel Corporation. All rights reserved. |
| 7 | // |
| 8 | // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> |
| 9 | // |
| 10 | |
| 11 | #include <linux/debugfs.h> |
| 12 | #include <linux/sched/signal.h> |
| 13 | #include "sof-priv.h" |
| 14 | #include "ops.h" |
| 15 | |
Kai Vehmanen | f627b05 | 2019-05-24 14:23:05 -0500 | [diff] [blame^] | 16 | static size_t sof_trace_avail(struct snd_sof_dev *sdev, |
| 17 | loff_t pos, size_t buffer_size) |
Liam Girdwood | fd51c47 | 2019-04-12 11:05:12 -0500 | [diff] [blame] | 18 | { |
Liam Girdwood | fd51c47 | 2019-04-12 11:05:12 -0500 | [diff] [blame] | 19 | loff_t host_offset = READ_ONCE(sdev->host_offset); |
| 20 | |
| 21 | /* |
| 22 | * If host offset is less than local pos, it means write pointer of |
| 23 | * host DMA buffer has been wrapped. We should output the trace data |
| 24 | * at the end of host DMA buffer at first. |
| 25 | */ |
| 26 | if (host_offset < pos) |
| 27 | return buffer_size - pos; |
| 28 | |
| 29 | /* If there is available trace data now, it is unnecessary to wait. */ |
| 30 | if (host_offset > pos) |
| 31 | return host_offset - pos; |
| 32 | |
Kai Vehmanen | f627b05 | 2019-05-24 14:23:05 -0500 | [diff] [blame^] | 33 | return 0; |
| 34 | } |
| 35 | |
| 36 | static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev, |
| 37 | loff_t pos, size_t buffer_size) |
| 38 | { |
| 39 | wait_queue_entry_t wait; |
| 40 | size_t ret = sof_trace_avail(sdev, pos, buffer_size); |
| 41 | |
| 42 | /* data immediately available */ |
| 43 | if (ret) |
| 44 | return ret; |
| 45 | |
Liam Girdwood | fd51c47 | 2019-04-12 11:05:12 -0500 | [diff] [blame] | 46 | /* wait for available trace data from FW */ |
| 47 | init_waitqueue_entry(&wait, current); |
| 48 | set_current_state(TASK_INTERRUPTIBLE); |
| 49 | add_wait_queue(&sdev->trace_sleep, &wait); |
| 50 | |
| 51 | if (!signal_pending(current)) { |
| 52 | /* set timeout to max value, no error code */ |
| 53 | schedule_timeout(MAX_SCHEDULE_TIMEOUT); |
| 54 | } |
| 55 | remove_wait_queue(&sdev->trace_sleep, &wait); |
| 56 | |
Kai Vehmanen | f627b05 | 2019-05-24 14:23:05 -0500 | [diff] [blame^] | 57 | return sof_trace_avail(sdev, pos, buffer_size); |
Liam Girdwood | fd51c47 | 2019-04-12 11:05:12 -0500 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer, |
| 61 | size_t count, loff_t *ppos) |
| 62 | { |
| 63 | struct snd_sof_dfsentry *dfse = file->private_data; |
| 64 | struct snd_sof_dev *sdev = dfse->sdev; |
| 65 | unsigned long rem; |
| 66 | loff_t lpos = *ppos; |
| 67 | size_t avail, buffer_size = dfse->size; |
| 68 | u64 lpos_64; |
| 69 | |
| 70 | /* make sure we know about any failures on the DSP side */ |
| 71 | sdev->dtrace_error = false; |
| 72 | |
| 73 | /* check pos and count */ |
| 74 | if (lpos < 0) |
| 75 | return -EINVAL; |
| 76 | if (!count) |
| 77 | return 0; |
| 78 | |
| 79 | /* check for buffer wrap and count overflow */ |
| 80 | lpos_64 = lpos; |
| 81 | lpos = do_div(lpos_64, buffer_size); |
| 82 | |
| 83 | if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */ |
| 84 | count = buffer_size - lpos; |
| 85 | |
| 86 | /* get available count based on current host offset */ |
| 87 | avail = sof_wait_trace_avail(sdev, lpos, buffer_size); |
| 88 | if (sdev->dtrace_error) { |
| 89 | dev_err(sdev->dev, "error: trace IO error\n"); |
| 90 | return -EIO; |
| 91 | } |
| 92 | |
| 93 | /* make sure count is <= avail */ |
| 94 | count = avail > count ? count : avail; |
| 95 | |
| 96 | /* copy available trace data to debugfs */ |
| 97 | rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count); |
| 98 | if (rem) |
| 99 | return -EFAULT; |
| 100 | |
| 101 | *ppos += count; |
| 102 | |
| 103 | /* move debugfs reading position */ |
| 104 | return count; |
| 105 | } |
| 106 | |
| 107 | static const struct file_operations sof_dfs_trace_fops = { |
| 108 | .open = simple_open, |
| 109 | .read = sof_dfsentry_trace_read, |
| 110 | .llseek = default_llseek, |
| 111 | }; |
| 112 | |
| 113 | static int trace_debugfs_create(struct snd_sof_dev *sdev) |
| 114 | { |
| 115 | struct snd_sof_dfsentry *dfse; |
| 116 | |
| 117 | if (!sdev) |
| 118 | return -EINVAL; |
| 119 | |
| 120 | dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); |
| 121 | if (!dfse) |
| 122 | return -ENOMEM; |
| 123 | |
| 124 | dfse->type = SOF_DFSENTRY_TYPE_BUF; |
| 125 | dfse->buf = sdev->dmatb.area; |
| 126 | dfse->size = sdev->dmatb.bytes; |
| 127 | dfse->sdev = sdev; |
| 128 | |
| 129 | dfse->dfsentry = debugfs_create_file("trace", 0444, sdev->debugfs_root, |
| 130 | dfse, &sof_dfs_trace_fops); |
| 131 | if (!dfse->dfsentry) { |
| 132 | /* can't rely on debugfs, only log error and keep going */ |
| 133 | dev_err(sdev->dev, |
| 134 | "error: cannot create debugfs entry for trace\n"); |
| 135 | } |
| 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev) |
| 141 | { |
| 142 | struct sof_ipc_dma_trace_params params; |
| 143 | struct sof_ipc_reply ipc_reply; |
| 144 | int ret; |
| 145 | |
| 146 | if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages) |
| 147 | return -EINVAL; |
| 148 | |
| 149 | /* set IPC parameters */ |
| 150 | params.hdr.size = sizeof(params); |
| 151 | params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_PARAMS; |
| 152 | params.buffer.phy_addr = sdev->dmatp.addr; |
| 153 | params.buffer.size = sdev->dmatb.bytes; |
| 154 | params.buffer.pages = sdev->dma_trace_pages; |
| 155 | params.stream_tag = 0; |
| 156 | |
| 157 | sdev->host_offset = 0; |
| 158 | |
| 159 | ret = snd_sof_dma_trace_init(sdev, ¶ms.stream_tag); |
| 160 | if (ret < 0) { |
| 161 | dev_err(sdev->dev, |
| 162 | "error: fail in snd_sof_dma_trace_init %d\n", ret); |
| 163 | return ret; |
| 164 | } |
| 165 | dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag); |
| 166 | |
| 167 | /* send IPC to the DSP */ |
| 168 | ret = sof_ipc_tx_message(sdev->ipc, |
| 169 | params.hdr.cmd, ¶ms, sizeof(params), |
| 170 | &ipc_reply, sizeof(ipc_reply)); |
| 171 | if (ret < 0) { |
| 172 | dev_err(sdev->dev, |
| 173 | "error: can't set params for DMA for trace %d\n", ret); |
| 174 | goto trace_release; |
| 175 | } |
| 176 | |
| 177 | ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START); |
| 178 | if (ret < 0) { |
| 179 | dev_err(sdev->dev, |
| 180 | "error: snd_sof_dma_trace_trigger: start: %d\n", ret); |
| 181 | goto trace_release; |
| 182 | } |
| 183 | |
| 184 | sdev->dtrace_is_enabled = true; |
| 185 | |
| 186 | return 0; |
| 187 | |
| 188 | trace_release: |
| 189 | snd_sof_dma_trace_release(sdev); |
| 190 | return ret; |
| 191 | } |
| 192 | |
| 193 | int snd_sof_init_trace(struct snd_sof_dev *sdev) |
| 194 | { |
| 195 | int ret; |
| 196 | |
| 197 | /* set false before start initialization */ |
| 198 | sdev->dtrace_is_enabled = false; |
| 199 | |
| 200 | /* allocate trace page table buffer */ |
| 201 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, |
| 202 | PAGE_SIZE, &sdev->dmatp); |
| 203 | if (ret < 0) { |
| 204 | dev_err(sdev->dev, |
| 205 | "error: can't alloc page table for trace %d\n", ret); |
| 206 | return ret; |
| 207 | } |
| 208 | |
| 209 | /* allocate trace data buffer */ |
| 210 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev, |
| 211 | DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb); |
| 212 | if (ret < 0) { |
| 213 | dev_err(sdev->dev, |
| 214 | "error: can't alloc buffer for trace %d\n", ret); |
| 215 | goto page_err; |
| 216 | } |
| 217 | |
| 218 | /* create compressed page table for audio firmware */ |
| 219 | ret = snd_sof_create_page_table(sdev, &sdev->dmatb, sdev->dmatp.area, |
| 220 | sdev->dmatb.bytes); |
| 221 | if (ret < 0) |
| 222 | goto table_err; |
| 223 | |
| 224 | sdev->dma_trace_pages = ret; |
| 225 | dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages); |
| 226 | |
| 227 | if (sdev->first_boot) { |
| 228 | ret = trace_debugfs_create(sdev); |
| 229 | if (ret < 0) |
| 230 | goto table_err; |
| 231 | } |
| 232 | |
| 233 | init_waitqueue_head(&sdev->trace_sleep); |
| 234 | |
| 235 | ret = snd_sof_init_trace_ipc(sdev); |
| 236 | if (ret < 0) |
| 237 | goto table_err; |
| 238 | |
| 239 | return 0; |
| 240 | table_err: |
| 241 | sdev->dma_trace_pages = 0; |
| 242 | snd_dma_free_pages(&sdev->dmatb); |
| 243 | page_err: |
| 244 | snd_dma_free_pages(&sdev->dmatp); |
| 245 | return ret; |
| 246 | } |
| 247 | EXPORT_SYMBOL(snd_sof_init_trace); |
| 248 | |
| 249 | int snd_sof_trace_update_pos(struct snd_sof_dev *sdev, |
| 250 | struct sof_ipc_dma_trace_posn *posn) |
| 251 | { |
| 252 | if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) { |
| 253 | sdev->host_offset = posn->host_offset; |
| 254 | wake_up(&sdev->trace_sleep); |
| 255 | } |
| 256 | |
| 257 | if (posn->overflow != 0) |
| 258 | dev_err(sdev->dev, |
| 259 | "error: DSP trace buffer overflow %u bytes. Total messages %d\n", |
| 260 | posn->overflow, posn->messages); |
| 261 | |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | /* an error has occurred within the DSP that prevents further trace */ |
| 266 | void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev) |
| 267 | { |
| 268 | if (sdev->dtrace_is_enabled) { |
| 269 | dev_err(sdev->dev, "error: waking up any trace sleepers\n"); |
| 270 | sdev->dtrace_error = true; |
| 271 | wake_up(&sdev->trace_sleep); |
| 272 | } |
| 273 | } |
| 274 | EXPORT_SYMBOL(snd_sof_trace_notify_for_error); |
| 275 | |
| 276 | void snd_sof_release_trace(struct snd_sof_dev *sdev) |
| 277 | { |
| 278 | int ret; |
| 279 | |
| 280 | if (!sdev->dtrace_is_enabled) |
| 281 | return; |
| 282 | |
| 283 | ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP); |
| 284 | if (ret < 0) |
| 285 | dev_err(sdev->dev, |
| 286 | "error: snd_sof_dma_trace_trigger: stop: %d\n", ret); |
| 287 | |
| 288 | ret = snd_sof_dma_trace_release(sdev); |
| 289 | if (ret < 0) |
| 290 | dev_err(sdev->dev, |
| 291 | "error: fail in snd_sof_dma_trace_release %d\n", ret); |
| 292 | |
| 293 | sdev->dtrace_is_enabled = false; |
| 294 | } |
| 295 | EXPORT_SYMBOL(snd_sof_release_trace); |
| 296 | |
| 297 | void snd_sof_free_trace(struct snd_sof_dev *sdev) |
| 298 | { |
| 299 | snd_sof_release_trace(sdev); |
| 300 | |
| 301 | snd_dma_free_pages(&sdev->dmatb); |
| 302 | snd_dma_free_pages(&sdev->dmatp); |
| 303 | } |
| 304 | EXPORT_SYMBOL(snd_sof_free_trace); |