Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Freescale MPC5200 PSC DMA |
| 3 | * ALSA SoC Platform driver |
| 4 | * |
| 5 | * Copyright (C) 2008 Secret Lab Technologies Ltd. |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 6 | * Copyright (C) 2009 Jon Smirl, Digispeaker |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 7 | */ |
| 8 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 9 | #include <linux/module.h> |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 10 | #include <linux/of_device.h> |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 11 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 12 | #include <sound/soc.h> |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 13 | |
| 14 | #include <sysdev/bestcomm/bestcomm.h> |
| 15 | #include <sysdev/bestcomm/gen_bd.h> |
| 16 | #include <asm/mpc52xx_psc.h> |
| 17 | |
| 18 | #include "mpc5200_dma.h" |
| 19 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 20 | /* |
| 21 | * Interrupt handlers |
| 22 | */ |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 23 | static irqreturn_t psc_dma_status_irq(int irq, void *_psc_dma) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 24 | { |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 25 | struct psc_dma *psc_dma = _psc_dma; |
| 26 | struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 27 | u16 isr; |
| 28 | |
| 29 | isr = in_be16(®s->mpc52xx_psc_isr); |
| 30 | |
| 31 | /* Playback underrun error */ |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 32 | if (psc_dma->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP)) |
| 33 | psc_dma->stats.underrun_count++; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 34 | |
| 35 | /* Capture overrun error */ |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 36 | if (psc_dma->capture.active && (isr & MPC52xx_PSC_IMR_ORERR)) |
| 37 | psc_dma->stats.overrun_count++; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 38 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 39 | out_8(®s->command, MPC52xx_PSC_RST_ERR_STAT); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 40 | |
| 41 | return IRQ_HANDLED; |
| 42 | } |
| 43 | |
| 44 | /** |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 45 | * psc_dma_bcom_enqueue_next_buffer - Enqueue another audio buffer |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 46 | * @s: pointer to stream private data structure |
| 47 | * |
| 48 | * Enqueues another audio period buffer into the bestcomm queue. |
| 49 | * |
| 50 | * Note: The routine must only be called when there is space available in |
| 51 | * the queue. Otherwise the enqueue will fail and the audio ring buffer |
| 52 | * will get out of sync |
| 53 | */ |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 54 | static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 55 | { |
| 56 | struct bcom_bd *bd; |
| 57 | |
| 58 | /* Prepare and enqueue the next buffer descriptor */ |
| 59 | bd = bcom_prepare_next_buffer(s->bcom_task); |
| 60 | bd->status = s->period_bytes; |
| 61 | bd->data[0] = s->period_next_pt; |
| 62 | bcom_submit_next_buffer(s->bcom_task, NULL); |
| 63 | |
| 64 | /* Update for next period */ |
| 65 | s->period_next_pt += s->period_bytes; |
| 66 | if (s->period_next_pt >= s->period_end) |
| 67 | s->period_next_pt = s->period_start; |
| 68 | } |
| 69 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 70 | static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s) |
| 71 | { |
John Bonesio | b0a2712 | 2009-07-29 08:38:55 -0700 | [diff] [blame] | 72 | if (s->appl_ptr > s->runtime->control->appl_ptr) { |
| 73 | /* |
| 74 | * In this case s->runtime->control->appl_ptr has wrapped around. |
| 75 | * Play the data to the end of the boundary, then wrap our own |
| 76 | * appl_ptr back around. |
| 77 | */ |
| 78 | while (s->appl_ptr < s->runtime->boundary) { |
| 79 | if (bcom_queue_full(s->bcom_task)) |
| 80 | return; |
| 81 | |
| 82 | s->appl_ptr += s->period_size; |
| 83 | |
| 84 | psc_dma_bcom_enqueue_next_buffer(s); |
| 85 | } |
| 86 | s->appl_ptr -= s->runtime->boundary; |
| 87 | } |
| 88 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 89 | while (s->appl_ptr < s->runtime->control->appl_ptr) { |
| 90 | |
| 91 | if (bcom_queue_full(s->bcom_task)) |
| 92 | return; |
| 93 | |
| 94 | s->appl_ptr += s->period_size; |
| 95 | |
| 96 | psc_dma_bcom_enqueue_next_buffer(s); |
| 97 | } |
| 98 | } |
| 99 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 100 | /* Bestcomm DMA irq handler */ |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 101 | static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 102 | { |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 103 | struct psc_dma_stream *s = _psc_dma_stream; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 104 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 105 | spin_lock(&s->psc_dma->lock); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 106 | /* For each finished period, dequeue the completed period buffer |
| 107 | * and enqueue a new one in it's place. */ |
| 108 | while (bcom_buffer_done(s->bcom_task)) { |
| 109 | bcom_retrieve_buffer(s->bcom_task, NULL, NULL); |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 110 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 111 | s->period_current_pt += s->period_bytes; |
| 112 | if (s->period_current_pt >= s->period_end) |
| 113 | s->period_current_pt = s->period_start; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 114 | } |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 115 | psc_dma_bcom_enqueue_tx(s); |
| 116 | spin_unlock(&s->psc_dma->lock); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 117 | |
| 118 | /* If the stream is active, then also inform the PCM middle layer |
| 119 | * of the period finished event. */ |
| 120 | if (s->active) |
| 121 | snd_pcm_period_elapsed(s->stream); |
| 122 | |
| 123 | return IRQ_HANDLED; |
| 124 | } |
| 125 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 126 | static irqreturn_t psc_dma_bcom_irq_rx(int irq, void *_psc_dma_stream) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 127 | { |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 128 | struct psc_dma_stream *s = _psc_dma_stream; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 129 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 130 | spin_lock(&s->psc_dma->lock); |
| 131 | /* For each finished period, dequeue the completed period buffer |
| 132 | * and enqueue a new one in it's place. */ |
| 133 | while (bcom_buffer_done(s->bcom_task)) { |
| 134 | bcom_retrieve_buffer(s->bcom_task, NULL, NULL); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 135 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 136 | s->period_current_pt += s->period_bytes; |
| 137 | if (s->period_current_pt >= s->period_end) |
| 138 | s->period_current_pt = s->period_start; |
| 139 | |
| 140 | psc_dma_bcom_enqueue_next_buffer(s); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 141 | } |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 142 | spin_unlock(&s->psc_dma->lock); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 143 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 144 | /* If the stream is active, then also inform the PCM middle layer |
| 145 | * of the period finished event. */ |
| 146 | if (s->active) |
| 147 | snd_pcm_period_elapsed(s->stream); |
| 148 | |
| 149 | return IRQ_HANDLED; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 150 | } |
| 151 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 152 | static int psc_dma_hw_free(struct snd_pcm_substream *substream) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 153 | { |
| 154 | snd_pcm_set_runtime_buffer(substream, NULL); |
| 155 | return 0; |
| 156 | } |
| 157 | |
| 158 | /** |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 159 | * psc_dma_trigger: start and stop the DMA transfer. |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 160 | * |
| 161 | * This function is called by ALSA to start, stop, pause, and resume the DMA |
| 162 | * transfer of data. |
| 163 | */ |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 164 | static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 165 | { |
| 166 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 167 | struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 168 | struct snd_pcm_runtime *runtime = substream->runtime; |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 169 | struct psc_dma_stream *s; |
| 170 | struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 171 | u16 imr; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 172 | unsigned long flags; |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 173 | int i; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 174 | |
| 175 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 176 | s = &psc_dma->capture; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 177 | else |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 178 | s = &psc_dma->playback; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 179 | |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 180 | dev_dbg(psc_dma->dev, "psc_dma_trigger(substream=%p, cmd=%i)" |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 181 | " stream_id=%i\n", |
| 182 | substream, cmd, substream->pstr->stream); |
| 183 | |
| 184 | switch (cmd) { |
| 185 | case SNDRV_PCM_TRIGGER_START: |
| 186 | s->period_bytes = frames_to_bytes(runtime, |
| 187 | runtime->period_size); |
| 188 | s->period_start = virt_to_phys(runtime->dma_area); |
| 189 | s->period_end = s->period_start + |
| 190 | (s->period_bytes * runtime->periods); |
| 191 | s->period_next_pt = s->period_start; |
| 192 | s->period_current_pt = s->period_start; |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 193 | s->period_size = runtime->period_size; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 194 | s->active = 1; |
| 195 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 196 | /* track appl_ptr so that we have a better chance of detecting |
| 197 | * end of stream and not over running it. |
| 198 | */ |
| 199 | s->runtime = runtime; |
| 200 | s->appl_ptr = s->runtime->control->appl_ptr - |
| 201 | (runtime->period_size * runtime->periods); |
| 202 | |
| 203 | /* Fill up the bestcomm bd queue and enable DMA. |
| 204 | * This will begin filling the PSC's fifo. |
| 205 | */ |
| 206 | spin_lock_irqsave(&psc_dma->lock, flags); |
| 207 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 208 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) { |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 209 | bcom_gen_bd_rx_reset(s->bcom_task); |
| 210 | for (i = 0; i < runtime->periods; i++) |
| 211 | if (!bcom_queue_full(s->bcom_task)) |
| 212 | psc_dma_bcom_enqueue_next_buffer(s); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 213 | } else { |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 214 | bcom_gen_bd_tx_reset(s->bcom_task); |
| 215 | psc_dma_bcom_enqueue_tx(s); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 216 | } |
| 217 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 218 | bcom_enable(s->bcom_task); |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 219 | spin_unlock_irqrestore(&psc_dma->lock, flags); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 220 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 221 | out_8(®s->command, MPC52xx_PSC_RST_ERR_STAT); |
| 222 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 223 | break; |
| 224 | |
| 225 | case SNDRV_PCM_TRIGGER_STOP: |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 226 | s->active = 0; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 227 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 228 | spin_lock_irqsave(&psc_dma->lock, flags); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 229 | bcom_disable(s->bcom_task); |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 230 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) |
| 231 | bcom_gen_bd_rx_reset(s->bcom_task); |
| 232 | else |
| 233 | bcom_gen_bd_tx_reset(s->bcom_task); |
| 234 | spin_unlock_irqrestore(&psc_dma->lock, flags); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 235 | |
| 236 | break; |
| 237 | |
| 238 | default: |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 239 | dev_dbg(psc_dma->dev, "invalid command\n"); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 240 | return -EINVAL; |
| 241 | } |
| 242 | |
| 243 | /* Update interrupt enable settings */ |
| 244 | imr = 0; |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 245 | if (psc_dma->playback.active) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 246 | imr |= MPC52xx_PSC_IMR_TXEMP; |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 247 | if (psc_dma->capture.active) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 248 | imr |= MPC52xx_PSC_IMR_ORERR; |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 249 | out_be16(®s->isr_imr.imr, psc_dma->imr | imr); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 250 | |
| 251 | return 0; |
| 252 | } |
| 253 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 254 | |
| 255 | /* --------------------------------------------------------------------- |
| 256 | * The PSC DMA 'ASoC platform' driver |
| 257 | * |
| 258 | * Can be referenced by an 'ASoC machine' driver |
| 259 | * This driver only deals with the audio bus; it doesn't have any |
| 260 | * interaction with the attached codec |
| 261 | */ |
| 262 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 263 | static const struct snd_pcm_hardware psc_dma_hardware = { |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 264 | .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | |
| 265 | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | |
| 266 | SNDRV_PCM_INFO_BATCH, |
| 267 | .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 268 | SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE, |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 269 | .rate_min = 8000, |
| 270 | .rate_max = 48000, |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 271 | .channels_min = 1, |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 272 | .channels_max = 2, |
| 273 | .period_bytes_max = 1024 * 1024, |
| 274 | .period_bytes_min = 32, |
| 275 | .periods_min = 2, |
| 276 | .periods_max = 256, |
| 277 | .buffer_bytes_max = 2 * 1024 * 1024, |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 278 | .fifo_size = 512, |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 279 | }; |
| 280 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 281 | static int psc_dma_open(struct snd_pcm_substream *substream) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 282 | { |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 283 | struct snd_pcm_runtime *runtime = substream->runtime; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 284 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 285 | struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data; |
| 286 | struct psc_dma_stream *s; |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 287 | int rc; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 288 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 289 | dev_dbg(psc_dma->dev, "psc_dma_open(substream=%p)\n", substream); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 290 | |
| 291 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 292 | s = &psc_dma->capture; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 293 | else |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 294 | s = &psc_dma->playback; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 295 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 296 | snd_soc_set_runtime_hwparams(substream, &psc_dma_hardware); |
| 297 | |
| 298 | rc = snd_pcm_hw_constraint_integer(runtime, |
| 299 | SNDRV_PCM_HW_PARAM_PERIODS); |
| 300 | if (rc < 0) { |
| 301 | dev_err(substream->pcm->card->dev, "invalid buffer size\n"); |
| 302 | return rc; |
| 303 | } |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 304 | |
| 305 | s->stream = substream; |
| 306 | return 0; |
| 307 | } |
| 308 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 309 | static int psc_dma_close(struct snd_pcm_substream *substream) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 310 | { |
| 311 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 312 | struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data; |
| 313 | struct psc_dma_stream *s; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 314 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 315 | dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 316 | |
| 317 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 318 | s = &psc_dma->capture; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 319 | else |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 320 | s = &psc_dma->playback; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 321 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 322 | if (!psc_dma->playback.active && |
| 323 | !psc_dma->capture.active) { |
| 324 | |
| 325 | /* Disable all interrupts and reset the PSC */ |
| 326 | out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); |
| 327 | out_8(&psc_dma->psc_regs->command, 4 << 4); /* reset error */ |
| 328 | } |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 329 | s->stream = NULL; |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | static snd_pcm_uframes_t |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 334 | psc_dma_pointer(struct snd_pcm_substream *substream) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 335 | { |
| 336 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 337 | struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data; |
| 338 | struct psc_dma_stream *s; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 339 | dma_addr_t count; |
| 340 | |
| 341 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 342 | s = &psc_dma->capture; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 343 | else |
Jon Smirl | cebe776 | 2009-05-23 19:13:01 -0400 | [diff] [blame] | 344 | s = &psc_dma->playback; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 345 | |
| 346 | count = s->period_current_pt - s->period_start; |
| 347 | |
| 348 | return bytes_to_frames(substream->runtime, count); |
| 349 | } |
| 350 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 351 | static int |
| 352 | psc_dma_hw_params(struct snd_pcm_substream *substream, |
| 353 | struct snd_pcm_hw_params *params) |
| 354 | { |
| 355 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); |
| 356 | |
| 357 | return 0; |
| 358 | } |
| 359 | |
| 360 | static struct snd_pcm_ops psc_dma_ops = { |
| 361 | .open = psc_dma_open, |
| 362 | .close = psc_dma_close, |
| 363 | .hw_free = psc_dma_hw_free, |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 364 | .ioctl = snd_pcm_lib_ioctl, |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 365 | .pointer = psc_dma_pointer, |
| 366 | .trigger = psc_dma_trigger, |
| 367 | .hw_params = psc_dma_hw_params, |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 368 | }; |
| 369 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 370 | static u64 psc_dma_dmamask = 0xffffffff; |
| 371 | static int psc_dma_new(struct snd_card *card, struct snd_soc_dai *dai, |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 372 | struct snd_pcm *pcm) |
| 373 | { |
| 374 | struct snd_soc_pcm_runtime *rtd = pcm->private_data; |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 375 | struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data; |
| 376 | size_t size = psc_dma_hardware.buffer_bytes_max; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 377 | int rc = 0; |
| 378 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 379 | dev_dbg(rtd->socdev->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n", |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 380 | card, dai, pcm); |
| 381 | |
| 382 | if (!card->dev->dma_mask) |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 383 | card->dev->dma_mask = &psc_dma_dmamask; |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 384 | if (!card->dev->coherent_dma_mask) |
| 385 | card->dev->coherent_dma_mask = 0xffffffff; |
| 386 | |
| 387 | if (pcm->streams[0].substream) { |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 388 | rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, |
| 389 | size, &pcm->streams[0].substream->dma_buffer); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 390 | if (rc) |
| 391 | goto playback_alloc_err; |
| 392 | } |
| 393 | |
| 394 | if (pcm->streams[1].substream) { |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 395 | rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, |
| 396 | size, &pcm->streams[1].substream->dma_buffer); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 397 | if (rc) |
| 398 | goto capture_alloc_err; |
| 399 | } |
| 400 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 401 | if (rtd->socdev->card->codec->ac97) |
| 402 | rtd->socdev->card->codec->ac97->private_data = psc_dma; |
| 403 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 404 | return 0; |
| 405 | |
| 406 | capture_alloc_err: |
| 407 | if (pcm->streams[0].substream) |
| 408 | snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer); |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 409 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 410 | playback_alloc_err: |
| 411 | dev_err(card->dev, "Cannot allocate buffer(s)\n"); |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 412 | |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 413 | return -ENOMEM; |
| 414 | } |
| 415 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 416 | static void psc_dma_free(struct snd_pcm *pcm) |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 417 | { |
| 418 | struct snd_soc_pcm_runtime *rtd = pcm->private_data; |
| 419 | struct snd_pcm_substream *substream; |
| 420 | int stream; |
| 421 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 422 | dev_dbg(rtd->socdev->dev, "psc_dma_free(pcm=%p)\n", pcm); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 423 | |
| 424 | for (stream = 0; stream < 2; stream++) { |
| 425 | substream = pcm->streams[stream].substream; |
| 426 | if (substream) { |
| 427 | snd_dma_free_pages(&substream->dma_buffer); |
| 428 | substream->dma_buffer.area = NULL; |
| 429 | substream->dma_buffer.addr = 0; |
| 430 | } |
| 431 | } |
| 432 | } |
| 433 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 434 | struct snd_soc_platform mpc5200_audio_dma_platform = { |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 435 | .name = "mpc5200-psc-audio", |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 436 | .pcm_ops = &psc_dma_ops, |
| 437 | .pcm_new = &psc_dma_new, |
| 438 | .pcm_free = &psc_dma_free, |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 439 | }; |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 440 | EXPORT_SYMBOL_GPL(mpc5200_audio_dma_platform); |
Jon Smirl | 89dd084 | 2009-05-23 19:12:59 -0400 | [diff] [blame] | 441 | |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 442 | int mpc5200_audio_dma_create(struct of_device *op) |
| 443 | { |
| 444 | phys_addr_t fifo; |
| 445 | struct psc_dma *psc_dma; |
| 446 | struct resource res; |
| 447 | int size, irq, rc; |
| 448 | const __be32 *prop; |
| 449 | void __iomem *regs; |
| 450 | |
| 451 | /* Fetch the registers and IRQ of the PSC */ |
| 452 | irq = irq_of_parse_and_map(op->node, 0); |
| 453 | if (of_address_to_resource(op->node, 0, &res)) { |
| 454 | dev_err(&op->dev, "Missing reg property\n"); |
| 455 | return -ENODEV; |
| 456 | } |
| 457 | regs = ioremap(res.start, 1 + res.end - res.start); |
| 458 | if (!regs) { |
| 459 | dev_err(&op->dev, "Could not map registers\n"); |
| 460 | return -ENODEV; |
| 461 | } |
| 462 | |
| 463 | /* Allocate and initialize the driver private data */ |
| 464 | psc_dma = kzalloc(sizeof *psc_dma, GFP_KERNEL); |
| 465 | if (!psc_dma) { |
| 466 | iounmap(regs); |
| 467 | return -ENOMEM; |
| 468 | } |
| 469 | |
| 470 | /* Get the PSC ID */ |
| 471 | prop = of_get_property(op->node, "cell-index", &size); |
| 472 | if (!prop || size < sizeof *prop) |
| 473 | return -ENODEV; |
| 474 | |
| 475 | spin_lock_init(&psc_dma->lock); |
Grant Likely | 0827d6b | 2009-07-02 11:57:25 -0600 | [diff] [blame] | 476 | mutex_init(&psc_dma->mutex); |
Jon Smirl | dbcc347 | 2009-05-26 08:34:08 -0400 | [diff] [blame] | 477 | psc_dma->id = be32_to_cpu(*prop); |
| 478 | psc_dma->irq = irq; |
| 479 | psc_dma->psc_regs = regs; |
| 480 | psc_dma->fifo_regs = regs + sizeof *psc_dma->psc_regs; |
| 481 | psc_dma->dev = &op->dev; |
| 482 | psc_dma->playback.psc_dma = psc_dma; |
| 483 | psc_dma->capture.psc_dma = psc_dma; |
| 484 | snprintf(psc_dma->name, sizeof psc_dma->name, "PSC%u", psc_dma->id); |
| 485 | |
| 486 | /* Find the address of the fifo data registers and setup the |
| 487 | * DMA tasks */ |
| 488 | fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32); |
| 489 | psc_dma->capture.bcom_task = |
| 490 | bcom_psc_gen_bd_rx_init(psc_dma->id, 10, fifo, 512); |
| 491 | psc_dma->playback.bcom_task = |
| 492 | bcom_psc_gen_bd_tx_init(psc_dma->id, 10, fifo); |
| 493 | if (!psc_dma->capture.bcom_task || |
| 494 | !psc_dma->playback.bcom_task) { |
| 495 | dev_err(&op->dev, "Could not allocate bestcomm tasks\n"); |
| 496 | iounmap(regs); |
| 497 | kfree(psc_dma); |
| 498 | return -ENODEV; |
| 499 | } |
| 500 | |
| 501 | /* Disable all interrupts and reset the PSC */ |
| 502 | out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); |
| 503 | /* reset receiver */ |
| 504 | out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_RX); |
| 505 | /* reset transmitter */ |
| 506 | out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_TX); |
| 507 | /* reset error */ |
| 508 | out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_ERR_STAT); |
| 509 | /* reset mode */ |
| 510 | out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_SEL_MODE_REG_1); |
| 511 | |
| 512 | /* Set up mode register; |
| 513 | * First write: RxRdy (FIFO Alarm) generates rx FIFO irq |
| 514 | * Second write: register Normal mode for non loopback |
| 515 | */ |
| 516 | out_8(&psc_dma->psc_regs->mode, 0); |
| 517 | out_8(&psc_dma->psc_regs->mode, 0); |
| 518 | |
| 519 | /* Set the TX and RX fifo alarm thresholds */ |
| 520 | out_be16(&psc_dma->fifo_regs->rfalarm, 0x100); |
| 521 | out_8(&psc_dma->fifo_regs->rfcntl, 0x4); |
| 522 | out_be16(&psc_dma->fifo_regs->tfalarm, 0x100); |
| 523 | out_8(&psc_dma->fifo_regs->tfcntl, 0x7); |
| 524 | |
| 525 | /* Lookup the IRQ numbers */ |
| 526 | psc_dma->playback.irq = |
| 527 | bcom_get_task_irq(psc_dma->playback.bcom_task); |
| 528 | psc_dma->capture.irq = |
| 529 | bcom_get_task_irq(psc_dma->capture.bcom_task); |
| 530 | |
| 531 | rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED, |
| 532 | "psc-dma-status", psc_dma); |
| 533 | rc |= request_irq(psc_dma->capture.irq, |
| 534 | &psc_dma_bcom_irq_rx, IRQF_SHARED, |
| 535 | "psc-dma-capture", &psc_dma->capture); |
| 536 | rc |= request_irq(psc_dma->playback.irq, |
| 537 | &psc_dma_bcom_irq_tx, IRQF_SHARED, |
| 538 | "psc-dma-playback", &psc_dma->playback); |
| 539 | if (rc) { |
| 540 | free_irq(psc_dma->irq, psc_dma); |
| 541 | free_irq(psc_dma->capture.irq, |
| 542 | &psc_dma->capture); |
| 543 | free_irq(psc_dma->playback.irq, |
| 544 | &psc_dma->playback); |
| 545 | return -ENODEV; |
| 546 | } |
| 547 | |
| 548 | /* Save what we've done so it can be found again later */ |
| 549 | dev_set_drvdata(&op->dev, psc_dma); |
| 550 | |
| 551 | /* Tell the ASoC OF helpers about it */ |
| 552 | return snd_soc_register_platform(&mpc5200_audio_dma_platform); |
| 553 | } |
| 554 | EXPORT_SYMBOL_GPL(mpc5200_audio_dma_create); |
| 555 | |
| 556 | int mpc5200_audio_dma_destroy(struct of_device *op) |
| 557 | { |
| 558 | struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); |
| 559 | |
| 560 | dev_dbg(&op->dev, "mpc5200_audio_dma_destroy()\n"); |
| 561 | |
| 562 | snd_soc_unregister_platform(&mpc5200_audio_dma_platform); |
| 563 | |
| 564 | bcom_gen_bd_rx_release(psc_dma->capture.bcom_task); |
| 565 | bcom_gen_bd_tx_release(psc_dma->playback.bcom_task); |
| 566 | |
| 567 | /* Release irqs */ |
| 568 | free_irq(psc_dma->irq, psc_dma); |
| 569 | free_irq(psc_dma->capture.irq, &psc_dma->capture); |
| 570 | free_irq(psc_dma->playback.irq, &psc_dma->playback); |
| 571 | |
| 572 | iounmap(psc_dma->psc_regs); |
| 573 | kfree(psc_dma); |
| 574 | dev_set_drvdata(&op->dev, NULL); |
| 575 | |
| 576 | return 0; |
| 577 | } |
| 578 | EXPORT_SYMBOL_GPL(mpc5200_audio_dma_destroy); |
| 579 | |
| 580 | MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); |
| 581 | MODULE_DESCRIPTION("Freescale MPC5200 PSC in DMA mode ASoC Driver"); |
| 582 | MODULE_LICENSE("GPL"); |