Pierre-Louis Bossart | e149ca2 | 2020-05-01 09:58:50 -0500 | [diff] [blame] | 1 | /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 2 | /* |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * Copyright(c) 2018 Intel Corporation. All rights reserved. |
| 7 | * |
| 8 | * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> |
| 9 | */ |
| 10 | |
| 11 | #ifndef __SOUND_SOC_SOF_IO_H |
| 12 | #define __SOUND_SOC_SOF_IO_H |
| 13 | |
| 14 | #include <linux/device.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/types.h> |
| 18 | #include <sound/pcm.h> |
| 19 | #include "sof-priv.h" |
| 20 | |
| 21 | #define sof_ops(sdev) \ |
| 22 | ((sdev)->pdata->desc->ops) |
| 23 | |
| 24 | /* Mandatory operations are verified during probing */ |
| 25 | |
| 26 | /* init */ |
| 27 | static inline int snd_sof_probe(struct snd_sof_dev *sdev) |
| 28 | { |
| 29 | return sof_ops(sdev)->probe(sdev); |
| 30 | } |
| 31 | |
| 32 | static inline int snd_sof_remove(struct snd_sof_dev *sdev) |
| 33 | { |
| 34 | if (sof_ops(sdev)->remove) |
| 35 | return sof_ops(sdev)->remove(sdev); |
| 36 | |
| 37 | return 0; |
| 38 | } |
| 39 | |
Keyon Jie | 7edb305 | 2021-01-13 17:26:14 +0200 | [diff] [blame] | 40 | static inline int snd_sof_shutdown(struct snd_sof_dev *sdev) |
| 41 | { |
| 42 | if (sof_ops(sdev)->shutdown) |
| 43 | return sof_ops(sdev)->shutdown(sdev); |
| 44 | |
| 45 | return 0; |
| 46 | } |
| 47 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 48 | /* control */ |
| 49 | |
| 50 | /* |
| 51 | * snd_sof_dsp_run returns the core mask of the cores that are available |
| 52 | * after successful fw boot |
| 53 | */ |
| 54 | static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev) |
| 55 | { |
| 56 | return sof_ops(sdev)->run(sdev); |
| 57 | } |
| 58 | |
Fred Oh | a70eb70 | 2020-11-27 18:40:21 +0200 | [diff] [blame] | 59 | static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev, unsigned int core_mask) |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 60 | { |
| 61 | if (sof_ops(sdev)->stall) |
Fred Oh | a70eb70 | 2020-11-27 18:40:21 +0200 | [diff] [blame] | 62 | return sof_ops(sdev)->stall(sdev, core_mask); |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 63 | |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev) |
| 68 | { |
| 69 | if (sof_ops(sdev)->reset) |
| 70 | return sof_ops(sdev)->reset(sdev); |
| 71 | |
| 72 | return 0; |
| 73 | } |
| 74 | |
Ranjani Sridharan | 9ea8074 | 2021-11-19 21:26:20 +0200 | [diff] [blame] | 75 | /* dsp core get/put */ |
Ranjani Sridharan | c414d5d | 2021-11-19 21:26:14 +0200 | [diff] [blame] | 76 | static inline int snd_sof_dsp_core_get(struct snd_sof_dev *sdev, int core) |
| 77 | { |
| 78 | if (core > sdev->num_cores - 1) { |
| 79 | dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core, |
| 80 | sdev->num_cores); |
| 81 | return -EINVAL; |
| 82 | } |
| 83 | |
| 84 | if (sof_ops(sdev)->core_get) { |
| 85 | int ret; |
| 86 | |
| 87 | /* if current ref_count is > 0, increment it and return */ |
| 88 | if (sdev->dsp_core_ref_count[core] > 0) { |
| 89 | sdev->dsp_core_ref_count[core]++; |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | /* power up the core */ |
| 94 | ret = sof_ops(sdev)->core_get(sdev, core); |
| 95 | if (ret < 0) |
| 96 | return ret; |
| 97 | |
| 98 | /* increment ref_count */ |
| 99 | sdev->dsp_core_ref_count[core]++; |
| 100 | |
| 101 | /* and update enabled_cores_mask */ |
| 102 | sdev->enabled_cores_mask |= BIT(core); |
| 103 | |
| 104 | dev_dbg(sdev->dev, "Core %d powered up\n", core); |
| 105 | } |
| 106 | |
| 107 | return 0; |
| 108 | } |
| 109 | |
| 110 | static inline int snd_sof_dsp_core_put(struct snd_sof_dev *sdev, int core) |
| 111 | { |
| 112 | if (core > sdev->num_cores - 1) { |
| 113 | dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core, |
| 114 | sdev->num_cores); |
| 115 | return -EINVAL; |
| 116 | } |
| 117 | |
| 118 | if (sof_ops(sdev)->core_put) { |
| 119 | int ret; |
| 120 | |
| 121 | /* decrement ref_count and return if it is > 0 */ |
| 122 | if (--(sdev->dsp_core_ref_count[core]) > 0) |
| 123 | return 0; |
| 124 | |
| 125 | /* power down the core */ |
| 126 | ret = sof_ops(sdev)->core_put(sdev, core); |
| 127 | if (ret < 0) |
| 128 | return ret; |
| 129 | |
| 130 | /* and update enabled_cores_mask */ |
| 131 | sdev->enabled_cores_mask &= ~BIT(core); |
| 132 | |
| 133 | dev_dbg(sdev->dev, "Core %d powered down\n", core); |
| 134 | } |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 139 | /* pre/post fw load */ |
| 140 | static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev) |
| 141 | { |
| 142 | if (sof_ops(sdev)->pre_fw_run) |
| 143 | return sof_ops(sdev)->pre_fw_run(sdev); |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev) |
| 149 | { |
| 150 | if (sof_ops(sdev)->post_fw_run) |
| 151 | return sof_ops(sdev)->post_fw_run(sdev); |
| 152 | |
| 153 | return 0; |
| 154 | } |
| 155 | |
Fred Oh | e984f3ef | 2020-11-27 18:40:18 +0200 | [diff] [blame] | 156 | /* parse platform specific extended manifest */ |
| 157 | static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev *sdev, |
| 158 | const struct sof_ext_man_elem_header *hdr) |
| 159 | { |
| 160 | if (sof_ops(sdev)->parse_platform_ext_manifest) |
| 161 | return sof_ops(sdev)->parse_platform_ext_manifest(sdev, hdr); |
| 162 | |
| 163 | return 0; |
| 164 | } |
| 165 | |
Daniel Baluta | ce8234a | 2019-07-22 09:13:47 -0500 | [diff] [blame] | 166 | /* misc */ |
| 167 | |
| 168 | /** |
| 169 | * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index |
| 170 | * |
| 171 | * @sdev: sof device |
| 172 | * @type: section type as described by snd_sof_fw_blk_type |
| 173 | * |
| 174 | * Returns the corresponding BAR index (a positive integer) or -EINVAL |
| 175 | * in case there is no mapping |
| 176 | */ |
| 177 | static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type) |
| 178 | { |
| 179 | if (sof_ops(sdev)->get_bar_index) |
| 180 | return sof_ops(sdev)->get_bar_index(sdev, type); |
| 181 | |
| 182 | return sdev->mmio_bar; |
| 183 | } |
| 184 | |
Daniel Baluta | bb9c93f | 2019-08-07 10:01:59 -0500 | [diff] [blame] | 185 | static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev) |
| 186 | { |
| 187 | if (sof_ops(sdev)->get_mailbox_offset) |
| 188 | return sof_ops(sdev)->get_mailbox_offset(sdev); |
| 189 | |
| 190 | dev_err(sdev->dev, "error: %s not defined\n", __func__); |
| 191 | return -ENOTSUPP; |
| 192 | } |
| 193 | |
Daniel Baluta | e17422c | 2019-08-07 10:02:00 -0500 | [diff] [blame] | 194 | static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev, |
| 195 | u32 id) |
| 196 | { |
| 197 | if (sof_ops(sdev)->get_window_offset) |
| 198 | return sof_ops(sdev)->get_window_offset(sdev, id); |
| 199 | |
| 200 | dev_err(sdev->dev, "error: %s not defined\n", __func__); |
| 201 | return -ENOTSUPP; |
| 202 | } |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 203 | /* power management */ |
| 204 | static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev) |
| 205 | { |
| 206 | if (sof_ops(sdev)->resume) |
| 207 | return sof_ops(sdev)->resume(sdev); |
| 208 | |
| 209 | return 0; |
| 210 | } |
| 211 | |
Ranjani Sridharan | 61e285c | 2020-01-29 16:07:22 -0600 | [diff] [blame] | 212 | static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev, |
| 213 | u32 target_state) |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 214 | { |
| 215 | if (sof_ops(sdev)->suspend) |
Ranjani Sridharan | 61e285c | 2020-01-29 16:07:22 -0600 | [diff] [blame] | 216 | return sof_ops(sdev)->suspend(sdev, target_state); |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 217 | |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev) |
| 222 | { |
| 223 | if (sof_ops(sdev)->runtime_resume) |
| 224 | return sof_ops(sdev)->runtime_resume(sdev); |
| 225 | |
| 226 | return 0; |
| 227 | } |
| 228 | |
Fred Oh | 1c38c92 | 2019-07-22 09:13:50 -0500 | [diff] [blame] | 229 | static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev) |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 230 | { |
| 231 | if (sof_ops(sdev)->runtime_suspend) |
Fred Oh | 1c38c92 | 2019-07-22 09:13:50 -0500 | [diff] [blame] | 232 | return sof_ops(sdev)->runtime_suspend(sdev); |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
Kai Vehmanen | 62fde97 | 2019-07-02 16:24:27 +0300 | [diff] [blame] | 237 | static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev) |
| 238 | { |
| 239 | if (sof_ops(sdev)->runtime_idle) |
| 240 | return sof_ops(sdev)->runtime_idle(sdev); |
| 241 | |
| 242 | return 0; |
| 243 | } |
| 244 | |
Ranjani Sridharan | 7077a07 | 2019-06-12 12:23:38 -0500 | [diff] [blame] | 245 | static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev) |
Ranjani Sridharan | ed3baac | 2019-04-30 18:09:31 -0500 | [diff] [blame] | 246 | { |
| 247 | if (sof_ops(sdev)->set_hw_params_upon_resume) |
Ranjani Sridharan | 7077a07 | 2019-06-12 12:23:38 -0500 | [diff] [blame] | 248 | return sof_ops(sdev)->set_hw_params_upon_resume(sdev); |
| 249 | return 0; |
Ranjani Sridharan | ed3baac | 2019-04-30 18:09:31 -0500 | [diff] [blame] | 250 | } |
| 251 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 252 | static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq) |
| 253 | { |
| 254 | if (sof_ops(sdev)->set_clk) |
| 255 | return sof_ops(sdev)->set_clk(sdev, freq); |
| 256 | |
| 257 | return 0; |
| 258 | } |
| 259 | |
Ranjani Sridharan | 61e285c | 2020-01-29 16:07:22 -0600 | [diff] [blame] | 260 | static inline int |
| 261 | snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev, |
| 262 | const struct sof_dsp_power_state *target_state) |
Keyon Jie | e8f112d | 2019-10-25 17:40:59 -0500 | [diff] [blame] | 263 | { |
Keyon Jie | 8b66d7c | 2021-01-05 17:56:40 +0200 | [diff] [blame] | 264 | int ret = 0; |
Keyon Jie | e8f112d | 2019-10-25 17:40:59 -0500 | [diff] [blame] | 265 | |
Keyon Jie | 8b66d7c | 2021-01-05 17:56:40 +0200 | [diff] [blame] | 266 | mutex_lock(&sdev->power_state_access); |
| 267 | |
| 268 | if (sof_ops(sdev)->set_power_state) |
| 269 | ret = sof_ops(sdev)->set_power_state(sdev, target_state); |
| 270 | |
| 271 | mutex_unlock(&sdev->power_state_access); |
| 272 | |
| 273 | return ret; |
Keyon Jie | e8f112d | 2019-10-25 17:40:59 -0500 | [diff] [blame] | 274 | } |
| 275 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 276 | /* debug */ |
Peter Ujfalusi | 2f14843 | 2021-12-23 13:36:14 +0200 | [diff] [blame] | 277 | void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags); |
Pan Xiuli | 5e4a27f | 2019-04-30 18:09:32 -0500 | [diff] [blame] | 278 | |
Peter Ujfalusi | 07e833b | 2021-09-15 15:21:12 +0300 | [diff] [blame] | 279 | static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev *sdev, |
| 280 | enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size, |
| 281 | const char *name, enum sof_debugfs_access_type access_type) |
| 282 | { |
| 283 | if (sof_ops(sdev) && sof_ops(sdev)->debugfs_add_region_item) |
| 284 | return sof_ops(sdev)->debugfs_add_region_item(sdev, blk_type, offset, |
| 285 | size, name, access_type); |
| 286 | |
| 287 | return 0; |
| 288 | } |
| 289 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 290 | /* register IO */ |
| 291 | static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar, |
| 292 | u32 offset, u32 value) |
| 293 | { |
| 294 | if (sof_ops(sdev)->write) { |
| 295 | sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value); |
| 296 | return; |
| 297 | } |
| 298 | |
| 299 | dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); |
| 300 | } |
| 301 | |
| 302 | static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar, |
| 303 | u32 offset, u64 value) |
| 304 | { |
| 305 | if (sof_ops(sdev)->write64) { |
| 306 | sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value); |
| 307 | return; |
| 308 | } |
| 309 | |
| 310 | dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); |
| 311 | } |
| 312 | |
| 313 | static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar, |
| 314 | u32 offset) |
| 315 | { |
| 316 | if (sof_ops(sdev)->read) |
| 317 | return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset); |
| 318 | |
| 319 | dev_err(sdev->dev, "error: %s not defined\n", __func__); |
| 320 | return -ENOTSUPP; |
| 321 | } |
| 322 | |
| 323 | static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar, |
| 324 | u32 offset) |
| 325 | { |
| 326 | if (sof_ops(sdev)->read64) |
| 327 | return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset); |
| 328 | |
| 329 | dev_err(sdev->dev, "error: %s not defined\n", __func__); |
| 330 | return -ENOTSUPP; |
| 331 | } |
| 332 | |
| 333 | /* block IO */ |
Peter Ujfalusi | 4624bb2 | 2021-09-15 15:21:11 +0300 | [diff] [blame] | 334 | static inline int snd_sof_dsp_block_read(struct snd_sof_dev *sdev, |
| 335 | enum snd_sof_fw_blk_type blk_type, |
| 336 | u32 offset, void *dest, size_t bytes) |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 337 | { |
Peter Ujfalusi | 4624bb2 | 2021-09-15 15:21:11 +0300 | [diff] [blame] | 338 | return sof_ops(sdev)->block_read(sdev, blk_type, offset, dest, bytes); |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 339 | } |
| 340 | |
Peter Ujfalusi | 4624bb2 | 2021-09-15 15:21:11 +0300 | [diff] [blame] | 341 | static inline int snd_sof_dsp_block_write(struct snd_sof_dev *sdev, |
| 342 | enum snd_sof_fw_blk_type blk_type, |
| 343 | u32 offset, void *src, size_t bytes) |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 344 | { |
Peter Ujfalusi | 4624bb2 | 2021-09-15 15:21:11 +0300 | [diff] [blame] | 345 | return sof_ops(sdev)->block_write(sdev, blk_type, offset, src, bytes); |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 346 | } |
| 347 | |
Daniel Baluta | f71f59d | 2021-10-04 18:21:44 +0300 | [diff] [blame] | 348 | /* mailbox IO */ |
| 349 | static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev *sdev, |
| 350 | u32 offset, void *dest, size_t bytes) |
| 351 | { |
| 352 | if (sof_ops(sdev)->mailbox_read) |
| 353 | sof_ops(sdev)->mailbox_read(sdev, offset, dest, bytes); |
| 354 | } |
| 355 | |
| 356 | static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev *sdev, |
| 357 | u32 offset, void *src, size_t bytes) |
| 358 | { |
| 359 | if (sof_ops(sdev)->mailbox_write) |
| 360 | sof_ops(sdev)->mailbox_write(sdev, offset, src, bytes); |
| 361 | } |
| 362 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 363 | /* ipc */ |
| 364 | static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev, |
| 365 | struct snd_sof_ipc_msg *msg) |
| 366 | { |
| 367 | return sof_ops(sdev)->send_msg(sdev, msg); |
| 368 | } |
| 369 | |
| 370 | /* host DMA trace */ |
| 371 | static inline int snd_sof_dma_trace_init(struct snd_sof_dev *sdev, |
| 372 | u32 *stream_tag) |
| 373 | { |
| 374 | if (sof_ops(sdev)->trace_init) |
| 375 | return sof_ops(sdev)->trace_init(sdev, stream_tag); |
| 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
| 380 | static inline int snd_sof_dma_trace_release(struct snd_sof_dev *sdev) |
| 381 | { |
| 382 | if (sof_ops(sdev)->trace_release) |
| 383 | return sof_ops(sdev)->trace_release(sdev); |
| 384 | |
| 385 | return 0; |
| 386 | } |
| 387 | |
| 388 | static inline int snd_sof_dma_trace_trigger(struct snd_sof_dev *sdev, int cmd) |
| 389 | { |
| 390 | if (sof_ops(sdev)->trace_trigger) |
| 391 | return sof_ops(sdev)->trace_trigger(sdev, cmd); |
| 392 | |
| 393 | return 0; |
| 394 | } |
| 395 | |
| 396 | /* host PCM ops */ |
| 397 | static inline int |
| 398 | snd_sof_pcm_platform_open(struct snd_sof_dev *sdev, |
| 399 | struct snd_pcm_substream *substream) |
| 400 | { |
| 401 | if (sof_ops(sdev) && sof_ops(sdev)->pcm_open) |
| 402 | return sof_ops(sdev)->pcm_open(sdev, substream); |
| 403 | |
| 404 | return 0; |
| 405 | } |
| 406 | |
| 407 | /* disconnect pcm substream to a host stream */ |
| 408 | static inline int |
| 409 | snd_sof_pcm_platform_close(struct snd_sof_dev *sdev, |
| 410 | struct snd_pcm_substream *substream) |
| 411 | { |
| 412 | if (sof_ops(sdev) && sof_ops(sdev)->pcm_close) |
| 413 | return sof_ops(sdev)->pcm_close(sdev, substream); |
| 414 | |
| 415 | return 0; |
| 416 | } |
| 417 | |
| 418 | /* host stream hw params */ |
| 419 | static inline int |
| 420 | snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev, |
| 421 | struct snd_pcm_substream *substream, |
| 422 | struct snd_pcm_hw_params *params, |
| 423 | struct sof_ipc_stream_params *ipc_params) |
| 424 | { |
| 425 | if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params) |
| 426 | return sof_ops(sdev)->pcm_hw_params(sdev, substream, |
| 427 | params, ipc_params); |
| 428 | |
| 429 | return 0; |
| 430 | } |
| 431 | |
Ranjani Sridharan | 93146bc | 2019-06-12 12:23:39 -0500 | [diff] [blame] | 432 | /* host stream hw free */ |
| 433 | static inline int |
| 434 | snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev, |
| 435 | struct snd_pcm_substream *substream) |
| 436 | { |
| 437 | if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free) |
| 438 | return sof_ops(sdev)->pcm_hw_free(sdev, substream); |
| 439 | |
| 440 | return 0; |
| 441 | } |
| 442 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 443 | /* host stream trigger */ |
| 444 | static inline int |
| 445 | snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev, |
| 446 | struct snd_pcm_substream *substream, int cmd) |
| 447 | { |
| 448 | if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger) |
| 449 | return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd); |
| 450 | |
| 451 | return 0; |
| 452 | } |
| 453 | |
Peter Ujfalusi | 96ec174 | 2021-09-14 15:53:56 +0300 | [diff] [blame] | 454 | /* Firmware loading */ |
| 455 | static inline int snd_sof_load_firmware(struct snd_sof_dev *sdev) |
| 456 | { |
| 457 | dev_dbg(sdev->dev, "loading firmware\n"); |
| 458 | |
| 459 | return sof_ops(sdev)->load_firmware(sdev); |
| 460 | } |
| 461 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 462 | /* host DSP message data */ |
Guennadi Liakhovetski | 6a0ba07 | 2021-09-28 13:35:16 +0300 | [diff] [blame] | 463 | static inline int snd_sof_ipc_msg_data(struct snd_sof_dev *sdev, |
| 464 | struct snd_pcm_substream *substream, |
| 465 | void *p, size_t sz) |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 466 | { |
Guennadi Liakhovetski | 6a0ba07 | 2021-09-28 13:35:16 +0300 | [diff] [blame] | 467 | return sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz); |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 468 | } |
| 469 | |
| 470 | /* host configure DSP HW parameters */ |
| 471 | static inline int |
| 472 | snd_sof_ipc_pcm_params(struct snd_sof_dev *sdev, |
| 473 | struct snd_pcm_substream *substream, |
| 474 | const struct sof_ipc_pcm_params_reply *reply) |
| 475 | { |
| 476 | return sof_ops(sdev)->ipc_pcm_params(sdev, substream, reply); |
| 477 | } |
| 478 | |
| 479 | /* host stream pointer */ |
| 480 | static inline snd_pcm_uframes_t |
| 481 | snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev, |
| 482 | struct snd_pcm_substream *substream) |
| 483 | { |
| 484 | if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer) |
| 485 | return sof_ops(sdev)->pcm_pointer(sdev, substream); |
| 486 | |
| 487 | return 0; |
| 488 | } |
| 489 | |
Ranjani Sridharan | 4a39ea3 | 2021-11-19 17:08:51 -0600 | [diff] [blame] | 490 | /* pcm ack */ |
| 491 | static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev, |
| 492 | struct snd_pcm_substream *substream) |
| 493 | { |
| 494 | if (sof_ops(sdev) && sof_ops(sdev)->pcm_ack) |
| 495 | return sof_ops(sdev)->pcm_ack(sdev, substream); |
| 496 | |
| 497 | return 0; |
| 498 | } |
| 499 | |
Cezary Rojewski | e145e9a | 2020-02-18 15:39:20 +0100 | [diff] [blame] | 500 | #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES) |
| 501 | static inline int |
| 502 | snd_sof_probe_compr_assign(struct snd_sof_dev *sdev, |
| 503 | struct snd_compr_stream *cstream, struct snd_soc_dai *dai) |
| 504 | { |
| 505 | return sof_ops(sdev)->probe_assign(sdev, cstream, dai); |
| 506 | } |
| 507 | |
| 508 | static inline int |
| 509 | snd_sof_probe_compr_free(struct snd_sof_dev *sdev, |
| 510 | struct snd_compr_stream *cstream, struct snd_soc_dai *dai) |
| 511 | { |
| 512 | return sof_ops(sdev)->probe_free(sdev, cstream, dai); |
| 513 | } |
| 514 | |
| 515 | static inline int |
| 516 | snd_sof_probe_compr_set_params(struct snd_sof_dev *sdev, |
| 517 | struct snd_compr_stream *cstream, |
| 518 | struct snd_compr_params *params, struct snd_soc_dai *dai) |
| 519 | { |
| 520 | return sof_ops(sdev)->probe_set_params(sdev, cstream, params, dai); |
| 521 | } |
| 522 | |
| 523 | static inline int |
| 524 | snd_sof_probe_compr_trigger(struct snd_sof_dev *sdev, |
| 525 | struct snd_compr_stream *cstream, int cmd, |
| 526 | struct snd_soc_dai *dai) |
| 527 | { |
| 528 | return sof_ops(sdev)->probe_trigger(sdev, cstream, cmd, dai); |
| 529 | } |
| 530 | |
| 531 | static inline int |
| 532 | snd_sof_probe_compr_pointer(struct snd_sof_dev *sdev, |
| 533 | struct snd_compr_stream *cstream, |
| 534 | struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai) |
| 535 | { |
| 536 | if (sof_ops(sdev) && sof_ops(sdev)->probe_pointer) |
| 537 | return sof_ops(sdev)->probe_pointer(sdev, cstream, tstamp, dai); |
| 538 | |
| 539 | return 0; |
| 540 | } |
| 541 | #endif |
| 542 | |
Daniel Baluta | 285880a | 2019-12-04 15:15:53 -0600 | [diff] [blame] | 543 | /* machine driver */ |
| 544 | static inline int |
| 545 | snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata) |
| 546 | { |
| 547 | if (sof_ops(sdev) && sof_ops(sdev)->machine_register) |
| 548 | return sof_ops(sdev)->machine_register(sdev, pdata); |
| 549 | |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | static inline void |
| 554 | snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata) |
| 555 | { |
| 556 | if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister) |
| 557 | sof_ops(sdev)->machine_unregister(sdev, pdata); |
| 558 | } |
| 559 | |
Guennadi Liakhovetski | cb515f1 | 2021-12-16 17:24:20 -0600 | [diff] [blame] | 560 | static inline struct snd_soc_acpi_mach * |
Daniel Baluta | 285880a | 2019-12-04 15:15:53 -0600 | [diff] [blame] | 561 | snd_sof_machine_select(struct snd_sof_dev *sdev) |
| 562 | { |
| 563 | if (sof_ops(sdev) && sof_ops(sdev)->machine_select) |
Guennadi Liakhovetski | cb515f1 | 2021-12-16 17:24:20 -0600 | [diff] [blame] | 564 | return sof_ops(sdev)->machine_select(sdev); |
| 565 | |
| 566 | return NULL; |
Daniel Baluta | 285880a | 2019-12-04 15:15:53 -0600 | [diff] [blame] | 567 | } |
| 568 | |
| 569 | static inline void |
Guennadi Liakhovetski | cb515f1 | 2021-12-16 17:24:20 -0600 | [diff] [blame] | 570 | snd_sof_set_mach_params(struct snd_soc_acpi_mach *mach, |
Pierre-Louis Bossart | 17e9d6b | 2021-04-09 15:01:18 -0700 | [diff] [blame] | 571 | struct snd_sof_dev *sdev) |
Daniel Baluta | 285880a | 2019-12-04 15:15:53 -0600 | [diff] [blame] | 572 | { |
Daniel Baluta | 285880a | 2019-12-04 15:15:53 -0600 | [diff] [blame] | 573 | if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params) |
Pierre-Louis Bossart | 17e9d6b | 2021-04-09 15:01:18 -0700 | [diff] [blame] | 574 | sof_ops(sdev)->set_mach_params(mach, sdev); |
Daniel Baluta | 285880a | 2019-12-04 15:15:53 -0600 | [diff] [blame] | 575 | } |
| 576 | |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 577 | /** |
| 578 | * snd_sof_dsp_register_poll_timeout - Periodically poll an address |
| 579 | * until a condition is met or a timeout occurs |
| 580 | * @op: accessor function (takes @addr as its only argument) |
| 581 | * @addr: Address to poll |
| 582 | * @val: Variable to read the value into |
| 583 | * @cond: Break condition (usually involving @val) |
| 584 | * @sleep_us: Maximum time to sleep between reads in us (0 |
| 585 | * tight-loops). Should be less than ~20ms since usleep_range |
Mauro Carvalho Chehab | 458f69e | 2019-06-12 14:53:00 -0300 | [diff] [blame] | 586 | * is used (see Documentation/timers/timers-howto.rst). |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 587 | * @timeout_us: Timeout in us, 0 means never timeout |
| 588 | * |
| 589 | * Returns 0 on success and -ETIMEDOUT upon a timeout. In either |
| 590 | * case, the last read value at @addr is stored in @val. Must not |
| 591 | * be called from atomic context if sleep_us or timeout_us are used. |
| 592 | * |
| 593 | * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. |
| 594 | */ |
| 595 | #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \ |
| 596 | ({ \ |
| 597 | u64 __timeout_us = (timeout_us); \ |
| 598 | unsigned long __sleep_us = (sleep_us); \ |
| 599 | ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ |
| 600 | might_sleep_if((__sleep_us) != 0); \ |
| 601 | for (;;) { \ |
| 602 | (val) = snd_sof_dsp_read(sdev, bar, offset); \ |
| 603 | if (cond) { \ |
| 604 | dev_dbg(sdev->dev, \ |
Keyon Jie | 3b2e93e | 2021-05-21 12:28:00 +0300 | [diff] [blame] | 605 | "FW Poll Status: reg[%#x]=%#x successful\n", \ |
| 606 | (offset), (val)); \ |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 607 | break; \ |
| 608 | } \ |
| 609 | if (__timeout_us && \ |
| 610 | ktime_compare(ktime_get(), __timeout) > 0) { \ |
| 611 | (val) = snd_sof_dsp_read(sdev, bar, offset); \ |
| 612 | dev_dbg(sdev->dev, \ |
Keyon Jie | 3b2e93e | 2021-05-21 12:28:00 +0300 | [diff] [blame] | 613 | "FW Poll Status: reg[%#x]=%#x timedout\n", \ |
| 614 | (offset), (val)); \ |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 615 | break; \ |
| 616 | } \ |
| 617 | if (__sleep_us) \ |
| 618 | usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ |
| 619 | } \ |
| 620 | (cond) ? 0 : -ETIMEDOUT; \ |
| 621 | }) |
| 622 | |
| 623 | /* This is for registers bits with attribute RWC */ |
| 624 | bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset, |
| 625 | u32 mask, u32 value); |
| 626 | |
| 627 | bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar, |
| 628 | u32 offset, u32 mask, u32 value); |
| 629 | |
| 630 | bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar, |
| 631 | u32 offset, u64 mask, u64 value); |
| 632 | |
| 633 | bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset, |
| 634 | u32 mask, u32 value); |
| 635 | |
| 636 | bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, |
| 637 | u32 offset, u64 mask, u64 value); |
| 638 | |
| 639 | void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar, |
| 640 | u32 offset, u32 mask, u32 value); |
| 641 | |
| 642 | int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset, |
| 643 | u32 mask, u32 target, u32 timeout_ms, |
| 644 | u32 interval_us); |
| 645 | |
Peter Ujfalusi | b2b10aa | 2021-12-23 13:36:13 +0200 | [diff] [blame] | 646 | void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable); |
Liam Girdwood | d1d95fc | 2019-04-12 11:05:13 -0500 | [diff] [blame] | 647 | #endif |