Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
| 2 | // |
| 3 | // This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | // redistributing this file, you may do so under either license. |
| 5 | // |
| 6 | // Copyright(c) 2018 Intel Corporation. All rights reserved. |
| 7 | // |
| 8 | // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> |
| 9 | // |
| 10 | |
| 11 | /* |
| 12 | * Hardware interface for audio DSP on Broadwell |
| 13 | */ |
| 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <sound/sof.h> |
| 17 | #include <sound/sof/xtensa.h> |
| 18 | #include "../ops.h" |
| 19 | #include "shim.h" |
| 20 | |
| 21 | /* BARs */ |
| 22 | #define BDW_DSP_BAR 0 |
| 23 | #define BDW_PCI_BAR 1 |
| 24 | |
| 25 | /* |
| 26 | * Debug |
| 27 | */ |
| 28 | |
| 29 | /* DSP memories for BDW */ |
| 30 | #define IRAM_OFFSET 0xA0000 |
| 31 | #define BDW_IRAM_SIZE (10 * 32 * 1024) |
| 32 | #define DRAM_OFFSET 0x00000 |
| 33 | #define BDW_DRAM_SIZE (20 * 32 * 1024) |
| 34 | #define SHIM_OFFSET 0xFB000 |
| 35 | #define SHIM_SIZE 0x100 |
| 36 | #define MBOX_OFFSET 0x9E000 |
| 37 | #define MBOX_SIZE 0x1000 |
| 38 | #define MBOX_DUMP_SIZE 0x30 |
| 39 | #define EXCEPT_OFFSET 0x800 |
| 40 | |
| 41 | /* DSP peripherals */ |
| 42 | #define DMAC0_OFFSET 0xFE000 |
| 43 | #define DMAC1_OFFSET 0xFF000 |
| 44 | #define DMAC_SIZE 0x420 |
| 45 | #define SSP0_OFFSET 0xFC000 |
| 46 | #define SSP1_OFFSET 0xFD000 |
| 47 | #define SSP_SIZE 0x100 |
| 48 | |
| 49 | #define BDW_STACK_DUMP_SIZE 32 |
| 50 | |
| 51 | #define BDW_PANIC_OFFSET(x) ((x) & 0xFFFF) |
| 52 | |
| 53 | static const struct snd_sof_debugfs_map bdw_debugfs[] = { |
| 54 | {"dmac0", BDW_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE, |
| 55 | SOF_DEBUGFS_ACCESS_ALWAYS}, |
| 56 | {"dmac1", BDW_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE, |
| 57 | SOF_DEBUGFS_ACCESS_ALWAYS}, |
| 58 | {"ssp0", BDW_DSP_BAR, SSP0_OFFSET, SSP_SIZE, |
| 59 | SOF_DEBUGFS_ACCESS_ALWAYS}, |
| 60 | {"ssp1", BDW_DSP_BAR, SSP1_OFFSET, SSP_SIZE, |
| 61 | SOF_DEBUGFS_ACCESS_ALWAYS}, |
| 62 | {"iram", BDW_DSP_BAR, IRAM_OFFSET, BDW_IRAM_SIZE, |
| 63 | SOF_DEBUGFS_ACCESS_D0_ONLY}, |
| 64 | {"dram", BDW_DSP_BAR, DRAM_OFFSET, BDW_DRAM_SIZE, |
| 65 | SOF_DEBUGFS_ACCESS_D0_ONLY}, |
| 66 | {"shim", BDW_DSP_BAR, SHIM_OFFSET, SHIM_SIZE, |
| 67 | SOF_DEBUGFS_ACCESS_ALWAYS}, |
| 68 | }; |
| 69 | |
| 70 | static void bdw_host_done(struct snd_sof_dev *sdev); |
| 71 | static void bdw_dsp_done(struct snd_sof_dev *sdev); |
| 72 | static void bdw_get_reply(struct snd_sof_dev *sdev); |
| 73 | |
| 74 | /* |
| 75 | * DSP Control. |
| 76 | */ |
| 77 | |
| 78 | static int bdw_run(struct snd_sof_dev *sdev) |
| 79 | { |
| 80 | /* set opportunistic mode on engine 0,1 for all channels */ |
| 81 | snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC, |
| 82 | SHIM_HMDC_HDDA_E0_ALLCH | |
| 83 | SHIM_HMDC_HDDA_E1_ALLCH, 0); |
| 84 | |
| 85 | /* set DSP to RUN */ |
| 86 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR, |
| 87 | SHIM_CSR_STALL, 0x0); |
| 88 | |
| 89 | /* return init core mask */ |
| 90 | return 1; |
| 91 | } |
| 92 | |
| 93 | static int bdw_reset(struct snd_sof_dev *sdev) |
| 94 | { |
| 95 | /* put DSP into reset and stall */ |
| 96 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR, |
| 97 | SHIM_CSR_RST | SHIM_CSR_STALL, |
| 98 | SHIM_CSR_RST | SHIM_CSR_STALL); |
| 99 | |
| 100 | /* keep in reset for 10ms */ |
| 101 | mdelay(10); |
| 102 | |
| 103 | /* take DSP out of reset and keep stalled for FW loading */ |
| 104 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR, |
| 105 | SHIM_CSR_RST | SHIM_CSR_STALL, |
| 106 | SHIM_CSR_STALL); |
| 107 | |
| 108 | return 0; |
| 109 | } |
| 110 | |
| 111 | static int bdw_set_dsp_D0(struct snd_sof_dev *sdev) |
| 112 | { |
| 113 | int tries = 10; |
| 114 | u32 reg; |
| 115 | |
| 116 | /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ |
| 117 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2, |
| 118 | PCI_VDRTCL2_DCLCGE | |
| 119 | PCI_VDRTCL2_DTCGE, 0); |
| 120 | |
| 121 | /* Disable D3PG (VDRTCTL0.D3PGD = 1) */ |
| 122 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0, |
| 123 | PCI_VDRTCL0_D3PGD, PCI_VDRTCL0_D3PGD); |
| 124 | |
| 125 | /* Set D0 state */ |
| 126 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_PMCS, |
| 127 | PCI_PMCS_PS_MASK, 0); |
| 128 | |
| 129 | /* check that ADSP shim is enabled */ |
| 130 | while (tries--) { |
| 131 | reg = readl(sdev->bar[BDW_PCI_BAR] + PCI_PMCS) |
| 132 | & PCI_PMCS_PS_MASK; |
| 133 | if (reg == 0) |
| 134 | goto finish; |
| 135 | |
| 136 | msleep(20); |
| 137 | } |
| 138 | |
| 139 | return -ENODEV; |
| 140 | |
| 141 | finish: |
| 142 | /* |
| 143 | * select SSP1 19.2MHz base clock, SSP clock 0, |
| 144 | * turn off Low Power Clock |
| 145 | */ |
| 146 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR, |
| 147 | SHIM_CSR_S1IOCS | SHIM_CSR_SBCS1 | |
| 148 | SHIM_CSR_LPCS, 0x0); |
| 149 | |
| 150 | /* stall DSP core, set clk to 192/96Mhz */ |
| 151 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, |
| 152 | SHIM_CSR, SHIM_CSR_STALL | |
| 153 | SHIM_CSR_DCS_MASK, |
| 154 | SHIM_CSR_STALL | |
| 155 | SHIM_CSR_DCS(4)); |
| 156 | |
| 157 | /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */ |
| 158 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CLKCTL, |
| 159 | SHIM_CLKCTL_MASK | |
| 160 | SHIM_CLKCTL_DCPLCG | |
| 161 | SHIM_CLKCTL_SCOE0, |
| 162 | SHIM_CLKCTL_MASK | |
| 163 | SHIM_CLKCTL_DCPLCG | |
| 164 | SHIM_CLKCTL_SCOE0); |
| 165 | |
| 166 | /* Stall and reset core, set CSR */ |
| 167 | bdw_reset(sdev); |
| 168 | |
| 169 | /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ |
| 170 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2, |
| 171 | PCI_VDRTCL2_DCLCGE | |
| 172 | PCI_VDRTCL2_DTCGE, |
| 173 | PCI_VDRTCL2_DCLCGE | |
| 174 | PCI_VDRTCL2_DTCGE); |
| 175 | |
| 176 | usleep_range(50, 55); |
| 177 | |
| 178 | /* switch on audio PLL */ |
| 179 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2, |
| 180 | PCI_VDRTCL2_APLLSE_MASK, 0); |
| 181 | |
| 182 | /* |
| 183 | * set default power gating control, enable power gating control for |
| 184 | * all blocks. that is, can't be accessed, please enable each block |
| 185 | * before accessing. |
| 186 | */ |
| 187 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0, |
| 188 | 0xfffffffC, 0x0); |
| 189 | |
| 190 | /* disable DMA finish function for SSP0 & SSP1 */ |
| 191 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR2, |
| 192 | SHIM_CSR2_SDFD_SSP1, |
| 193 | SHIM_CSR2_SDFD_SSP1); |
| 194 | |
| 195 | /* set on-demond mode on engine 0,1 for all channels */ |
| 196 | snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC, |
| 197 | SHIM_HMDC_HDDA_E0_ALLCH | |
| 198 | SHIM_HMDC_HDDA_E1_ALLCH, |
| 199 | SHIM_HMDC_HDDA_E0_ALLCH | |
| 200 | SHIM_HMDC_HDDA_E1_ALLCH); |
| 201 | |
| 202 | /* Enable Interrupt from both sides */ |
| 203 | snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRX, |
| 204 | (SHIM_IMRX_BUSY | SHIM_IMRX_DONE), 0x0); |
| 205 | snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRD, |
| 206 | (SHIM_IMRD_DONE | SHIM_IMRD_BUSY | |
| 207 | SHIM_IMRD_SSP0 | SHIM_IMRD_DMAC), 0x0); |
| 208 | |
| 209 | /* clear IPC registers */ |
| 210 | snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, 0x0); |
| 211 | snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCD, 0x0); |
| 212 | snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0x80, 0x6); |
| 213 | snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0xe0, 0x300a); |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | static void bdw_get_registers(struct snd_sof_dev *sdev, |
| 219 | struct sof_ipc_dsp_oops_xtensa *xoops, |
| 220 | struct sof_ipc_panic_info *panic_info, |
| 221 | u32 *stack, size_t stack_words) |
| 222 | { |
Kai Vehmanen | 14104eb | 2019-06-03 11:18:15 -0500 | [diff] [blame^] | 223 | u32 offset = sdev->dsp_oops_offset; |
| 224 | |
| 225 | /* first read registers */ |
| 226 | sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops)); |
| 227 | |
| 228 | /* note: variable AR register array is not read */ |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 229 | |
| 230 | /* then get panic info */ |
Kai Vehmanen | 14104eb | 2019-06-03 11:18:15 -0500 | [diff] [blame^] | 231 | offset += xoops->arch_hdr.totalsize; |
| 232 | sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info)); |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 233 | |
| 234 | /* then get the stack */ |
Kai Vehmanen | 14104eb | 2019-06-03 11:18:15 -0500 | [diff] [blame^] | 235 | offset += sizeof(*panic_info); |
| 236 | sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32)); |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | static void bdw_dump(struct snd_sof_dev *sdev, u32 flags) |
| 240 | { |
| 241 | struct sof_ipc_dsp_oops_xtensa xoops; |
| 242 | struct sof_ipc_panic_info panic_info; |
| 243 | u32 stack[BDW_STACK_DUMP_SIZE]; |
| 244 | u32 status, panic; |
| 245 | |
| 246 | /* now try generic SOF status messages */ |
| 247 | status = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD); |
| 248 | panic = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX); |
| 249 | bdw_get_registers(sdev, &xoops, &panic_info, stack, |
| 250 | BDW_STACK_DUMP_SIZE); |
| 251 | snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack, |
| 252 | BDW_STACK_DUMP_SIZE); |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * IPC Doorbell IRQ handler and thread. |
| 257 | */ |
| 258 | |
| 259 | static irqreturn_t bdw_irq_handler(int irq, void *context) |
| 260 | { |
| 261 | struct snd_sof_dev *sdev = context; |
| 262 | u32 isr; |
| 263 | int ret = IRQ_NONE; |
| 264 | |
| 265 | /* Interrupt arrived, check src */ |
| 266 | isr = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_ISRX); |
| 267 | if (isr & (SHIM_ISRX_DONE | SHIM_ISRX_BUSY)) |
| 268 | ret = IRQ_WAKE_THREAD; |
| 269 | |
| 270 | return ret; |
| 271 | } |
| 272 | |
| 273 | static irqreturn_t bdw_irq_thread(int irq, void *context) |
| 274 | { |
| 275 | struct snd_sof_dev *sdev = context; |
| 276 | u32 ipcx, ipcd, imrx; |
| 277 | |
| 278 | imrx = snd_sof_dsp_read64(sdev, BDW_DSP_BAR, SHIM_IMRX); |
| 279 | ipcx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX); |
| 280 | |
| 281 | /* reply message from DSP */ |
| 282 | if (ipcx & SHIM_IPCX_DONE && |
| 283 | !(imrx & SHIM_IMRX_DONE)) { |
| 284 | /* Mask Done interrupt before return */ |
| 285 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, |
| 286 | SHIM_IMRX, SHIM_IMRX_DONE, |
| 287 | SHIM_IMRX_DONE); |
| 288 | |
Guennadi Liakhovetski | 1183e9a | 2019-05-24 14:09:23 -0500 | [diff] [blame] | 289 | spin_lock_irq(&sdev->ipc_lock); |
| 290 | |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 291 | /* |
| 292 | * handle immediate reply from DSP core. If the msg is |
| 293 | * found, set done bit in cmd_done which is called at the |
| 294 | * end of message processing function, else set it here |
| 295 | * because the done bit can't be set in cmd_done function |
| 296 | * which is triggered by msg |
| 297 | */ |
| 298 | bdw_get_reply(sdev); |
| 299 | snd_sof_ipc_reply(sdev, ipcx); |
| 300 | |
| 301 | bdw_dsp_done(sdev); |
Guennadi Liakhovetski | 1183e9a | 2019-05-24 14:09:23 -0500 | [diff] [blame] | 302 | |
| 303 | spin_unlock_irq(&sdev->ipc_lock); |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | ipcd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD); |
| 307 | |
| 308 | /* new message from DSP */ |
| 309 | if (ipcd & SHIM_IPCD_BUSY && |
| 310 | !(imrx & SHIM_IMRX_BUSY)) { |
| 311 | /* Mask Busy interrupt before return */ |
| 312 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, |
| 313 | SHIM_IMRX, SHIM_IMRX_BUSY, |
| 314 | SHIM_IMRX_BUSY); |
| 315 | |
| 316 | /* Handle messages from DSP Core */ |
| 317 | if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) { |
| 318 | snd_sof_dsp_panic(sdev, BDW_PANIC_OFFSET(ipcx) + |
| 319 | MBOX_OFFSET); |
| 320 | } else { |
| 321 | snd_sof_ipc_msgs_rx(sdev); |
| 322 | } |
| 323 | |
| 324 | bdw_host_done(sdev); |
| 325 | } |
| 326 | |
| 327 | return IRQ_HANDLED; |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * IPC Firmware ready. |
| 332 | */ |
| 333 | static void bdw_get_windows(struct snd_sof_dev *sdev) |
| 334 | { |
| 335 | struct sof_ipc_window_elem *elem; |
| 336 | u32 outbox_offset = 0; |
| 337 | u32 stream_offset = 0; |
| 338 | u32 inbox_offset = 0; |
| 339 | u32 outbox_size = 0; |
| 340 | u32 stream_size = 0; |
| 341 | u32 inbox_size = 0; |
| 342 | int i; |
| 343 | |
| 344 | if (!sdev->info_window) { |
| 345 | dev_err(sdev->dev, "error: have no window info\n"); |
| 346 | return; |
| 347 | } |
| 348 | |
| 349 | for (i = 0; i < sdev->info_window->num_windows; i++) { |
| 350 | elem = &sdev->info_window->window[i]; |
| 351 | |
| 352 | switch (elem->type) { |
| 353 | case SOF_IPC_REGION_UPBOX: |
| 354 | inbox_offset = elem->offset + MBOX_OFFSET; |
| 355 | inbox_size = elem->size; |
| 356 | snd_sof_debugfs_io_item(sdev, |
| 357 | sdev->bar[BDW_DSP_BAR] + |
| 358 | inbox_offset, |
| 359 | elem->size, "inbox", |
| 360 | SOF_DEBUGFS_ACCESS_D0_ONLY); |
| 361 | break; |
| 362 | case SOF_IPC_REGION_DOWNBOX: |
| 363 | outbox_offset = elem->offset + MBOX_OFFSET; |
| 364 | outbox_size = elem->size; |
| 365 | snd_sof_debugfs_io_item(sdev, |
| 366 | sdev->bar[BDW_DSP_BAR] + |
| 367 | outbox_offset, |
| 368 | elem->size, "outbox", |
| 369 | SOF_DEBUGFS_ACCESS_D0_ONLY); |
| 370 | break; |
| 371 | case SOF_IPC_REGION_TRACE: |
| 372 | snd_sof_debugfs_io_item(sdev, |
| 373 | sdev->bar[BDW_DSP_BAR] + |
| 374 | elem->offset + |
| 375 | MBOX_OFFSET, |
| 376 | elem->size, "etrace", |
| 377 | SOF_DEBUGFS_ACCESS_D0_ONLY); |
| 378 | break; |
| 379 | case SOF_IPC_REGION_DEBUG: |
| 380 | snd_sof_debugfs_io_item(sdev, |
| 381 | sdev->bar[BDW_DSP_BAR] + |
| 382 | elem->offset + |
| 383 | MBOX_OFFSET, |
| 384 | elem->size, "debug", |
| 385 | SOF_DEBUGFS_ACCESS_D0_ONLY); |
| 386 | break; |
| 387 | case SOF_IPC_REGION_STREAM: |
| 388 | stream_offset = elem->offset + MBOX_OFFSET; |
| 389 | stream_size = elem->size; |
| 390 | snd_sof_debugfs_io_item(sdev, |
| 391 | sdev->bar[BDW_DSP_BAR] + |
| 392 | stream_offset, |
| 393 | elem->size, "stream", |
| 394 | SOF_DEBUGFS_ACCESS_D0_ONLY); |
| 395 | break; |
| 396 | case SOF_IPC_REGION_REGS: |
| 397 | snd_sof_debugfs_io_item(sdev, |
| 398 | sdev->bar[BDW_DSP_BAR] + |
| 399 | elem->offset + |
| 400 | MBOX_OFFSET, |
| 401 | elem->size, "regs", |
| 402 | SOF_DEBUGFS_ACCESS_D0_ONLY); |
| 403 | break; |
| 404 | case SOF_IPC_REGION_EXCEPTION: |
| 405 | sdev->dsp_oops_offset = elem->offset + MBOX_OFFSET; |
| 406 | snd_sof_debugfs_io_item(sdev, |
| 407 | sdev->bar[BDW_DSP_BAR] + |
| 408 | elem->offset + |
| 409 | MBOX_OFFSET, |
| 410 | elem->size, "exception", |
| 411 | SOF_DEBUGFS_ACCESS_D0_ONLY); |
| 412 | break; |
| 413 | default: |
| 414 | dev_err(sdev->dev, "error: get illegal window info\n"); |
| 415 | return; |
| 416 | } |
| 417 | } |
| 418 | |
| 419 | if (outbox_size == 0 || inbox_size == 0) { |
| 420 | dev_err(sdev->dev, "error: get illegal mailbox window\n"); |
| 421 | return; |
| 422 | } |
| 423 | |
| 424 | snd_sof_dsp_mailbox_init(sdev, inbox_offset, inbox_size, |
| 425 | outbox_offset, outbox_size); |
| 426 | sdev->stream_box.offset = stream_offset; |
| 427 | sdev->stream_box.size = stream_size; |
| 428 | |
| 429 | dev_dbg(sdev->dev, " mailbox upstream 0x%x - size 0x%x\n", |
| 430 | inbox_offset, inbox_size); |
| 431 | dev_dbg(sdev->dev, " mailbox downstream 0x%x - size 0x%x\n", |
| 432 | outbox_offset, outbox_size); |
| 433 | dev_dbg(sdev->dev, " stream region 0x%x - size 0x%x\n", |
| 434 | stream_offset, stream_size); |
| 435 | } |
| 436 | |
| 437 | /* check for ABI compatibility and create memory windows on first boot */ |
| 438 | static int bdw_fw_ready(struct snd_sof_dev *sdev, u32 msg_id) |
| 439 | { |
| 440 | struct sof_ipc_fw_ready *fw_ready = &sdev->fw_ready; |
| 441 | u32 offset; |
| 442 | int ret; |
| 443 | |
| 444 | /* mailbox must be on 4k boundary */ |
| 445 | offset = MBOX_OFFSET; |
| 446 | |
| 447 | dev_dbg(sdev->dev, "ipc: DSP is ready 0x%8.8x offset %d\n", |
| 448 | msg_id, offset); |
| 449 | |
| 450 | /* no need to re-check version/ABI for subsequent boots */ |
| 451 | if (!sdev->first_boot) |
| 452 | return 0; |
| 453 | |
| 454 | /* copy data from the DSP FW ready offset */ |
| 455 | sof_block_read(sdev, sdev->mmio_bar, offset, fw_ready, |
| 456 | sizeof(*fw_ready)); |
| 457 | |
| 458 | snd_sof_dsp_mailbox_init(sdev, fw_ready->dspbox_offset, |
| 459 | fw_ready->dspbox_size, |
| 460 | fw_ready->hostbox_offset, |
| 461 | fw_ready->hostbox_size); |
| 462 | |
| 463 | /* make sure ABI version is compatible */ |
| 464 | ret = snd_sof_ipc_valid(sdev); |
| 465 | if (ret < 0) |
| 466 | return ret; |
| 467 | |
| 468 | /* now check for extended data */ |
| 469 | snd_sof_fw_parse_ext_data(sdev, sdev->mmio_bar, MBOX_OFFSET + |
| 470 | sizeof(struct sof_ipc_fw_ready)); |
| 471 | |
| 472 | bdw_get_windows(sdev); |
| 473 | |
| 474 | return 0; |
| 475 | } |
| 476 | |
| 477 | /* |
| 478 | * IPC Mailbox IO |
| 479 | */ |
| 480 | |
| 481 | static int bdw_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg) |
| 482 | { |
| 483 | /* send the message */ |
| 484 | sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data, |
| 485 | msg->msg_size); |
| 486 | snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, SHIM_IPCX_BUSY); |
| 487 | |
| 488 | return 0; |
| 489 | } |
| 490 | |
| 491 | static void bdw_get_reply(struct snd_sof_dev *sdev) |
| 492 | { |
| 493 | struct snd_sof_ipc_msg *msg = sdev->msg; |
| 494 | struct sof_ipc_reply reply; |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 495 | int ret = 0; |
| 496 | |
| 497 | /* |
| 498 | * Sometimes, there is unexpected reply ipc arriving. The reply |
| 499 | * ipc belongs to none of the ipcs sent from driver. |
| 500 | * In this case, the driver must ignore the ipc. |
| 501 | */ |
| 502 | if (!msg) { |
| 503 | dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n"); |
| 504 | return; |
| 505 | } |
| 506 | |
| 507 | /* get reply */ |
| 508 | sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply)); |
| 509 | |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 510 | if (reply.error < 0) { |
| 511 | memcpy(msg->reply_data, &reply, sizeof(reply)); |
| 512 | ret = reply.error; |
| 513 | } else { |
| 514 | /* reply correct size ? */ |
| 515 | if (reply.hdr.size != msg->reply_size) { |
| 516 | dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n", |
| 517 | msg->reply_size, reply.hdr.size); |
| 518 | ret = -EINVAL; |
| 519 | } |
| 520 | |
| 521 | /* read the message */ |
| 522 | if (msg->reply_size > 0) |
| 523 | sof_mailbox_read(sdev, sdev->host_box.offset, |
| 524 | msg->reply_data, msg->reply_size); |
| 525 | } |
| 526 | |
| 527 | msg->reply_error = ret; |
Liam Girdwood | 458bc72 | 2019-04-12 11:08:45 -0500 | [diff] [blame] | 528 | } |
| 529 | |
| 530 | static void bdw_host_done(struct snd_sof_dev *sdev) |
| 531 | { |
| 532 | /* clear BUSY bit and set DONE bit - accept new messages */ |
| 533 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCD, |
| 534 | SHIM_IPCD_BUSY | SHIM_IPCD_DONE, |
| 535 | SHIM_IPCD_DONE); |
| 536 | |
| 537 | /* unmask busy interrupt */ |
| 538 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX, |
| 539 | SHIM_IMRX_BUSY, 0); |
| 540 | } |
| 541 | |
| 542 | static void bdw_dsp_done(struct snd_sof_dev *sdev) |
| 543 | { |
| 544 | /* clear DONE bit - tell DSP we have completed */ |
| 545 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCX, |
| 546 | SHIM_IPCX_DONE, 0); |
| 547 | |
| 548 | /* unmask Done interrupt */ |
| 549 | snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX, |
| 550 | SHIM_IMRX_DONE, 0); |
| 551 | } |
| 552 | |
| 553 | /* |
| 554 | * Probe and remove. |
| 555 | */ |
| 556 | static int bdw_probe(struct snd_sof_dev *sdev) |
| 557 | { |
| 558 | struct snd_sof_pdata *pdata = sdev->pdata; |
| 559 | const struct sof_dev_desc *desc = pdata->desc; |
| 560 | struct platform_device *pdev = |
| 561 | container_of(sdev->dev, struct platform_device, dev); |
| 562 | struct resource *mmio; |
| 563 | u32 base, size; |
| 564 | int ret; |
| 565 | |
| 566 | /* LPE base */ |
| 567 | mmio = platform_get_resource(pdev, IORESOURCE_MEM, |
| 568 | desc->resindex_lpe_base); |
| 569 | if (mmio) { |
| 570 | base = mmio->start; |
| 571 | size = resource_size(mmio); |
| 572 | } else { |
| 573 | dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n", |
| 574 | desc->resindex_lpe_base); |
| 575 | return -EINVAL; |
| 576 | } |
| 577 | |
| 578 | dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size); |
| 579 | sdev->bar[BDW_DSP_BAR] = devm_ioremap(sdev->dev, base, size); |
| 580 | if (!sdev->bar[BDW_DSP_BAR]) { |
| 581 | dev_err(sdev->dev, |
| 582 | "error: failed to ioremap LPE base 0x%x size 0x%x\n", |
| 583 | base, size); |
| 584 | return -ENODEV; |
| 585 | } |
| 586 | dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BDW_DSP_BAR]); |
| 587 | |
| 588 | /* TODO: add offsets */ |
| 589 | sdev->mmio_bar = BDW_DSP_BAR; |
| 590 | sdev->mailbox_bar = BDW_DSP_BAR; |
| 591 | |
| 592 | /* PCI base */ |
| 593 | mmio = platform_get_resource(pdev, IORESOURCE_MEM, |
| 594 | desc->resindex_pcicfg_base); |
| 595 | if (mmio) { |
| 596 | base = mmio->start; |
| 597 | size = resource_size(mmio); |
| 598 | } else { |
| 599 | dev_err(sdev->dev, "error: failed to get PCI base at idx %d\n", |
| 600 | desc->resindex_pcicfg_base); |
| 601 | return -ENODEV; |
| 602 | } |
| 603 | |
| 604 | dev_dbg(sdev->dev, "PCI base at 0x%x size 0x%x", base, size); |
| 605 | sdev->bar[BDW_PCI_BAR] = devm_ioremap(sdev->dev, base, size); |
| 606 | if (!sdev->bar[BDW_PCI_BAR]) { |
| 607 | dev_err(sdev->dev, |
| 608 | "error: failed to ioremap PCI base 0x%x size 0x%x\n", |
| 609 | base, size); |
| 610 | return -ENODEV; |
| 611 | } |
| 612 | dev_dbg(sdev->dev, "PCI VADDR %p\n", sdev->bar[BDW_PCI_BAR]); |
| 613 | |
| 614 | /* register our IRQ */ |
| 615 | sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc); |
| 616 | if (sdev->ipc_irq < 0) { |
| 617 | dev_err(sdev->dev, "error: failed to get IRQ at index %d\n", |
| 618 | desc->irqindex_host_ipc); |
| 619 | return sdev->ipc_irq; |
| 620 | } |
| 621 | |
| 622 | dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq); |
| 623 | ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq, |
| 624 | bdw_irq_handler, bdw_irq_thread, |
| 625 | IRQF_SHARED, "AudioDSP", sdev); |
| 626 | if (ret < 0) { |
| 627 | dev_err(sdev->dev, "error: failed to register IRQ %d\n", |
| 628 | sdev->ipc_irq); |
| 629 | return ret; |
| 630 | } |
| 631 | |
| 632 | /* enable the DSP SHIM */ |
| 633 | ret = bdw_set_dsp_D0(sdev); |
| 634 | if (ret < 0) { |
| 635 | dev_err(sdev->dev, "error: failed to set DSP D0\n"); |
| 636 | return ret; |
| 637 | } |
| 638 | |
| 639 | /* DSP DMA can only access low 31 bits of host memory */ |
| 640 | ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31)); |
| 641 | if (ret < 0) { |
| 642 | dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret); |
| 643 | return ret; |
| 644 | } |
| 645 | |
| 646 | /* set default mailbox */ |
| 647 | snd_sof_dsp_mailbox_init(sdev, MBOX_OFFSET, MBOX_SIZE, 0, 0); |
| 648 | |
| 649 | return ret; |
| 650 | } |
| 651 | |
| 652 | /* Broadwell DAIs */ |
| 653 | static struct snd_soc_dai_driver bdw_dai[] = { |
| 654 | { |
| 655 | .name = "ssp0-port", |
| 656 | }, |
| 657 | { |
| 658 | .name = "ssp1-port", |
| 659 | }, |
| 660 | }; |
| 661 | |
| 662 | /* broadwell ops */ |
| 663 | const struct snd_sof_dsp_ops sof_bdw_ops = { |
| 664 | /*Device init */ |
| 665 | .probe = bdw_probe, |
| 666 | |
| 667 | /* DSP Core Control */ |
| 668 | .run = bdw_run, |
| 669 | .reset = bdw_reset, |
| 670 | |
| 671 | /* Register IO */ |
| 672 | .write = sof_io_write, |
| 673 | .read = sof_io_read, |
| 674 | .write64 = sof_io_write64, |
| 675 | .read64 = sof_io_read64, |
| 676 | |
| 677 | /* Block IO */ |
| 678 | .block_read = sof_block_read, |
| 679 | .block_write = sof_block_write, |
| 680 | |
| 681 | /* ipc */ |
| 682 | .send_msg = bdw_send_msg, |
| 683 | .fw_ready = bdw_fw_ready, |
| 684 | |
| 685 | .ipc_msg_data = intel_ipc_msg_data, |
| 686 | .ipc_pcm_params = intel_ipc_pcm_params, |
| 687 | |
| 688 | /* debug */ |
| 689 | .debug_map = bdw_debugfs, |
| 690 | .debug_map_count = ARRAY_SIZE(bdw_debugfs), |
| 691 | .dbg_dump = bdw_dump, |
| 692 | |
| 693 | /* stream callbacks */ |
| 694 | .pcm_open = intel_pcm_open, |
| 695 | .pcm_close = intel_pcm_close, |
| 696 | |
| 697 | /* Module loading */ |
| 698 | .load_module = snd_sof_parse_module_memcpy, |
| 699 | |
| 700 | /*Firmware loading */ |
| 701 | .load_firmware = snd_sof_load_firmware_memcpy, |
| 702 | |
| 703 | /* DAI drivers */ |
| 704 | .drv = bdw_dai, |
| 705 | .num_drv = ARRAY_SIZE(bdw_dai) |
| 706 | }; |
| 707 | EXPORT_SYMBOL(sof_bdw_ops); |
| 708 | |
| 709 | const struct sof_intel_dsp_desc bdw_chip_info = { |
| 710 | .cores_num = 1, |
| 711 | .cores_mask = 1, |
| 712 | }; |
| 713 | EXPORT_SYMBOL(bdw_chip_info); |
| 714 | |
| 715 | MODULE_LICENSE("Dual BSD/GPL"); |