Liam Girdwood | fd51c47 | 2019-04-12 11:05:12 -0500 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
| 2 | // |
| 3 | // This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | // redistributing this file, you may do so under either license. |
| 5 | // |
| 6 | // Copyright(c) 2018 Intel Corporation. All rights reserved. |
| 7 | // |
| 8 | // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> |
| 9 | // |
| 10 | |
| 11 | #include <linux/debugfs.h> |
| 12 | #include <linux/sched/signal.h> |
| 13 | #include "sof-priv.h" |
| 14 | #include "ops.h" |
| 15 | |
| 16 | static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev, |
| 17 | loff_t pos, size_t buffer_size) |
| 18 | { |
| 19 | wait_queue_entry_t wait; |
| 20 | loff_t host_offset = READ_ONCE(sdev->host_offset); |
| 21 | |
| 22 | /* |
| 23 | * If host offset is less than local pos, it means write pointer of |
| 24 | * host DMA buffer has been wrapped. We should output the trace data |
| 25 | * at the end of host DMA buffer at first. |
| 26 | */ |
| 27 | if (host_offset < pos) |
| 28 | return buffer_size - pos; |
| 29 | |
| 30 | /* If there is available trace data now, it is unnecessary to wait. */ |
| 31 | if (host_offset > pos) |
| 32 | return host_offset - pos; |
| 33 | |
| 34 | /* wait for available trace data from FW */ |
| 35 | init_waitqueue_entry(&wait, current); |
| 36 | set_current_state(TASK_INTERRUPTIBLE); |
| 37 | add_wait_queue(&sdev->trace_sleep, &wait); |
| 38 | |
| 39 | if (!signal_pending(current)) { |
| 40 | /* set timeout to max value, no error code */ |
| 41 | schedule_timeout(MAX_SCHEDULE_TIMEOUT); |
| 42 | } |
| 43 | remove_wait_queue(&sdev->trace_sleep, &wait); |
| 44 | |
| 45 | /* return bytes available for copy */ |
| 46 | host_offset = READ_ONCE(sdev->host_offset); |
| 47 | if (host_offset < pos) |
| 48 | return buffer_size - pos; |
| 49 | |
| 50 | return host_offset - pos; |
| 51 | } |
| 52 | |
| 53 | static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer, |
| 54 | size_t count, loff_t *ppos) |
| 55 | { |
| 56 | struct snd_sof_dfsentry *dfse = file->private_data; |
| 57 | struct snd_sof_dev *sdev = dfse->sdev; |
| 58 | unsigned long rem; |
| 59 | loff_t lpos = *ppos; |
| 60 | size_t avail, buffer_size = dfse->size; |
| 61 | u64 lpos_64; |
| 62 | |
| 63 | /* make sure we know about any failures on the DSP side */ |
| 64 | sdev->dtrace_error = false; |
| 65 | |
| 66 | /* check pos and count */ |
| 67 | if (lpos < 0) |
| 68 | return -EINVAL; |
| 69 | if (!count) |
| 70 | return 0; |
| 71 | |
| 72 | /* check for buffer wrap and count overflow */ |
| 73 | lpos_64 = lpos; |
| 74 | lpos = do_div(lpos_64, buffer_size); |
| 75 | |
| 76 | if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */ |
| 77 | count = buffer_size - lpos; |
| 78 | |
| 79 | /* get available count based on current host offset */ |
| 80 | avail = sof_wait_trace_avail(sdev, lpos, buffer_size); |
| 81 | if (sdev->dtrace_error) { |
| 82 | dev_err(sdev->dev, "error: trace IO error\n"); |
| 83 | return -EIO; |
| 84 | } |
| 85 | |
| 86 | /* make sure count is <= avail */ |
| 87 | count = avail > count ? count : avail; |
| 88 | |
| 89 | /* copy available trace data to debugfs */ |
| 90 | rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count); |
| 91 | if (rem) |
| 92 | return -EFAULT; |
| 93 | |
| 94 | *ppos += count; |
| 95 | |
| 96 | /* move debugfs reading position */ |
| 97 | return count; |
| 98 | } |
| 99 | |
| 100 | static const struct file_operations sof_dfs_trace_fops = { |
| 101 | .open = simple_open, |
| 102 | .read = sof_dfsentry_trace_read, |
| 103 | .llseek = default_llseek, |
| 104 | }; |
| 105 | |
| 106 | static int trace_debugfs_create(struct snd_sof_dev *sdev) |
| 107 | { |
| 108 | struct snd_sof_dfsentry *dfse; |
| 109 | |
| 110 | if (!sdev) |
| 111 | return -EINVAL; |
| 112 | |
| 113 | dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); |
| 114 | if (!dfse) |
| 115 | return -ENOMEM; |
| 116 | |
| 117 | dfse->type = SOF_DFSENTRY_TYPE_BUF; |
| 118 | dfse->buf = sdev->dmatb.area; |
| 119 | dfse->size = sdev->dmatb.bytes; |
| 120 | dfse->sdev = sdev; |
| 121 | |
| 122 | dfse->dfsentry = debugfs_create_file("trace", 0444, sdev->debugfs_root, |
| 123 | dfse, &sof_dfs_trace_fops); |
| 124 | if (!dfse->dfsentry) { |
| 125 | /* can't rely on debugfs, only log error and keep going */ |
| 126 | dev_err(sdev->dev, |
| 127 | "error: cannot create debugfs entry for trace\n"); |
| 128 | } |
| 129 | |
| 130 | return 0; |
| 131 | } |
| 132 | |
| 133 | int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev) |
| 134 | { |
| 135 | struct sof_ipc_dma_trace_params params; |
| 136 | struct sof_ipc_reply ipc_reply; |
| 137 | int ret; |
| 138 | |
| 139 | if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages) |
| 140 | return -EINVAL; |
| 141 | |
| 142 | /* set IPC parameters */ |
| 143 | params.hdr.size = sizeof(params); |
| 144 | params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_PARAMS; |
| 145 | params.buffer.phy_addr = sdev->dmatp.addr; |
| 146 | params.buffer.size = sdev->dmatb.bytes; |
| 147 | params.buffer.pages = sdev->dma_trace_pages; |
| 148 | params.stream_tag = 0; |
| 149 | |
| 150 | sdev->host_offset = 0; |
| 151 | |
| 152 | ret = snd_sof_dma_trace_init(sdev, ¶ms.stream_tag); |
| 153 | if (ret < 0) { |
| 154 | dev_err(sdev->dev, |
| 155 | "error: fail in snd_sof_dma_trace_init %d\n", ret); |
| 156 | return ret; |
| 157 | } |
| 158 | dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag); |
| 159 | |
| 160 | /* send IPC to the DSP */ |
| 161 | ret = sof_ipc_tx_message(sdev->ipc, |
| 162 | params.hdr.cmd, ¶ms, sizeof(params), |
| 163 | &ipc_reply, sizeof(ipc_reply)); |
| 164 | if (ret < 0) { |
| 165 | dev_err(sdev->dev, |
| 166 | "error: can't set params for DMA for trace %d\n", ret); |
| 167 | goto trace_release; |
| 168 | } |
| 169 | |
| 170 | ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START); |
| 171 | if (ret < 0) { |
| 172 | dev_err(sdev->dev, |
| 173 | "error: snd_sof_dma_trace_trigger: start: %d\n", ret); |
| 174 | goto trace_release; |
| 175 | } |
| 176 | |
| 177 | sdev->dtrace_is_enabled = true; |
| 178 | |
| 179 | return 0; |
| 180 | |
| 181 | trace_release: |
| 182 | snd_sof_dma_trace_release(sdev); |
| 183 | return ret; |
| 184 | } |
| 185 | |
| 186 | int snd_sof_init_trace(struct snd_sof_dev *sdev) |
| 187 | { |
| 188 | int ret; |
| 189 | |
| 190 | /* set false before start initialization */ |
| 191 | sdev->dtrace_is_enabled = false; |
| 192 | |
| 193 | /* allocate trace page table buffer */ |
| 194 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, |
| 195 | PAGE_SIZE, &sdev->dmatp); |
| 196 | if (ret < 0) { |
| 197 | dev_err(sdev->dev, |
| 198 | "error: can't alloc page table for trace %d\n", ret); |
| 199 | return ret; |
| 200 | } |
| 201 | |
| 202 | /* allocate trace data buffer */ |
| 203 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev, |
| 204 | DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb); |
| 205 | if (ret < 0) { |
| 206 | dev_err(sdev->dev, |
| 207 | "error: can't alloc buffer for trace %d\n", ret); |
| 208 | goto page_err; |
| 209 | } |
| 210 | |
| 211 | /* create compressed page table for audio firmware */ |
| 212 | ret = snd_sof_create_page_table(sdev, &sdev->dmatb, sdev->dmatp.area, |
| 213 | sdev->dmatb.bytes); |
| 214 | if (ret < 0) |
| 215 | goto table_err; |
| 216 | |
| 217 | sdev->dma_trace_pages = ret; |
| 218 | dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages); |
| 219 | |
| 220 | if (sdev->first_boot) { |
| 221 | ret = trace_debugfs_create(sdev); |
| 222 | if (ret < 0) |
| 223 | goto table_err; |
| 224 | } |
| 225 | |
| 226 | init_waitqueue_head(&sdev->trace_sleep); |
| 227 | |
| 228 | ret = snd_sof_init_trace_ipc(sdev); |
| 229 | if (ret < 0) |
| 230 | goto table_err; |
| 231 | |
| 232 | return 0; |
| 233 | table_err: |
| 234 | sdev->dma_trace_pages = 0; |
| 235 | snd_dma_free_pages(&sdev->dmatb); |
| 236 | page_err: |
| 237 | snd_dma_free_pages(&sdev->dmatp); |
| 238 | return ret; |
| 239 | } |
| 240 | EXPORT_SYMBOL(snd_sof_init_trace); |
| 241 | |
| 242 | int snd_sof_trace_update_pos(struct snd_sof_dev *sdev, |
| 243 | struct sof_ipc_dma_trace_posn *posn) |
| 244 | { |
| 245 | if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) { |
| 246 | sdev->host_offset = posn->host_offset; |
| 247 | wake_up(&sdev->trace_sleep); |
| 248 | } |
| 249 | |
| 250 | if (posn->overflow != 0) |
| 251 | dev_err(sdev->dev, |
| 252 | "error: DSP trace buffer overflow %u bytes. Total messages %d\n", |
| 253 | posn->overflow, posn->messages); |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | /* an error has occurred within the DSP that prevents further trace */ |
| 259 | void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev) |
| 260 | { |
| 261 | if (sdev->dtrace_is_enabled) { |
| 262 | dev_err(sdev->dev, "error: waking up any trace sleepers\n"); |
| 263 | sdev->dtrace_error = true; |
| 264 | wake_up(&sdev->trace_sleep); |
| 265 | } |
| 266 | } |
| 267 | EXPORT_SYMBOL(snd_sof_trace_notify_for_error); |
| 268 | |
| 269 | void snd_sof_release_trace(struct snd_sof_dev *sdev) |
| 270 | { |
| 271 | int ret; |
| 272 | |
| 273 | if (!sdev->dtrace_is_enabled) |
| 274 | return; |
| 275 | |
| 276 | ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP); |
| 277 | if (ret < 0) |
| 278 | dev_err(sdev->dev, |
| 279 | "error: snd_sof_dma_trace_trigger: stop: %d\n", ret); |
| 280 | |
| 281 | ret = snd_sof_dma_trace_release(sdev); |
| 282 | if (ret < 0) |
| 283 | dev_err(sdev->dev, |
| 284 | "error: fail in snd_sof_dma_trace_release %d\n", ret); |
| 285 | |
| 286 | sdev->dtrace_is_enabled = false; |
| 287 | } |
| 288 | EXPORT_SYMBOL(snd_sof_release_trace); |
| 289 | |
| 290 | void snd_sof_free_trace(struct snd_sof_dev *sdev) |
| 291 | { |
| 292 | snd_sof_release_trace(sdev); |
| 293 | |
| 294 | snd_dma_free_pages(&sdev->dmatb); |
| 295 | snd_dma_free_pages(&sdev->dmatp); |
| 296 | } |
| 297 | EXPORT_SYMBOL(snd_sof_free_trace); |