blob: a776f24f43112484779ac89f031fcb7f6093335d [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090032#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050033#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090035#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010036#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000037#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000038#include <linux/of_gpio.h>
Zhangfei Gaobf626e52014-01-09 22:35:10 +080039#include <linux/mmc/slot-gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050040
41#include "dw_mmc.h"
42
43/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090044#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050045 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090055#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
Will Newtonf95f3852011-01-02 01:11:59 -050058#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090059#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
Will Newtonf95f3852011-01-02 01:11:59 -050064struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040076 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050077
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
Seungwon Jeon0976f162013-08-31 00:12:42 +090084static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
Will Newtonf95f3852011-01-02 01:11:59 -050094
Seungwon Jeon0976f162013-08-31 00:12:42 +090095static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500112};
113
Seungwon Jeon31bff452013-08-31 00:14:23 +0900114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
Will Newtonf95f3852011-01-02 01:11:59 -0500117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
238static void dw_mci_set_timeout(struct dw_mci *host)
239{
240 /* timeout (maximum) */
241 mci_writel(host, TMOUT, 0xffffffff);
242}
243
244static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
245{
246 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000247 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000248 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500249 u32 cmdr;
250 cmd->error = -EINPROGRESS;
251
252 cmdr = cmd->opcode;
253
Seungwon Jeon90c21432013-08-31 00:14:05 +0900254 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
255 cmd->opcode == MMC_GO_IDLE_STATE ||
256 cmd->opcode == MMC_GO_INACTIVE_STATE ||
257 (cmd->opcode == SD_IO_RW_DIRECT &&
258 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500259 cmdr |= SDMMC_CMD_STOP;
260 else
Seungwon Jeon90c21432013-08-31 00:14:05 +0900261 if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
262 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500263
264 if (cmd->flags & MMC_RSP_PRESENT) {
265 /* We expect a response, so set this bit */
266 cmdr |= SDMMC_CMD_RESP_EXP;
267 if (cmd->flags & MMC_RSP_136)
268 cmdr |= SDMMC_CMD_RESP_LONG;
269 }
270
271 if (cmd->flags & MMC_RSP_CRC)
272 cmdr |= SDMMC_CMD_RESP_CRC;
273
274 data = cmd->data;
275 if (data) {
276 cmdr |= SDMMC_CMD_DAT_EXP;
277 if (data->flags & MMC_DATA_STREAM)
278 cmdr |= SDMMC_CMD_STRM_MODE;
279 if (data->flags & MMC_DATA_WRITE)
280 cmdr |= SDMMC_CMD_DAT_WR;
281 }
282
James Hogancb27a842012-10-16 09:43:08 +0100283 if (drv_data && drv_data->prepare_command)
284 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000285
Will Newtonf95f3852011-01-02 01:11:59 -0500286 return cmdr;
287}
288
Seungwon Jeon90c21432013-08-31 00:14:05 +0900289static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
290{
291 struct mmc_command *stop;
292 u32 cmdr;
293
294 if (!cmd->data)
295 return 0;
296
297 stop = &host->stop_abort;
298 cmdr = cmd->opcode;
299 memset(stop, 0, sizeof(struct mmc_command));
300
301 if (cmdr == MMC_READ_SINGLE_BLOCK ||
302 cmdr == MMC_READ_MULTIPLE_BLOCK ||
303 cmdr == MMC_WRITE_BLOCK ||
304 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
305 stop->opcode = MMC_STOP_TRANSMISSION;
306 stop->arg = 0;
307 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
308 } else if (cmdr == SD_IO_RW_EXTENDED) {
309 stop->opcode = SD_IO_RW_DIRECT;
310 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
311 ((cmd->arg >> 28) & 0x7);
312 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
313 } else {
314 return 0;
315 }
316
317 cmdr = stop->opcode | SDMMC_CMD_STOP |
318 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
319
320 return cmdr;
321}
322
Will Newtonf95f3852011-01-02 01:11:59 -0500323static void dw_mci_start_command(struct dw_mci *host,
324 struct mmc_command *cmd, u32 cmd_flags)
325{
326 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000327 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500328 "start command: ARGR=0x%08x CMDR=0x%08x\n",
329 cmd->arg, cmd_flags);
330
331 mci_writel(host, CMDARG, cmd->arg);
332 wmb();
333
334 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
335}
336
Seungwon Jeon90c21432013-08-31 00:14:05 +0900337static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500338{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900339 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
340 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500341}
342
343/* DMA interface functions */
344static void dw_mci_stop_dma(struct dw_mci *host)
345{
James Hogan03e8cb52011-06-29 09:28:43 +0100346 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500347 host->dma_ops->stop(host);
348 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500349 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900350
351 /* Data transfer was stopped by the interrupt handler */
352 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500353}
354
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900355static int dw_mci_get_dma_dir(struct mmc_data *data)
356{
357 if (data->flags & MMC_DATA_WRITE)
358 return DMA_TO_DEVICE;
359 else
360 return DMA_FROM_DEVICE;
361}
362
Jaehoon Chung9beee912012-02-16 11:19:38 +0900363#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500364static void dw_mci_dma_cleanup(struct dw_mci *host)
365{
366 struct mmc_data *data = host->data;
367
368 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900369 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000370 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900371 data->sg,
372 data->sg_len,
373 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500374}
375
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900376static void dw_mci_idmac_reset(struct dw_mci *host)
377{
378 u32 bmod = mci_readl(host, BMOD);
379 /* Software reset of DMA */
380 bmod |= SDMMC_IDMAC_SWRESET;
381 mci_writel(host, BMOD, bmod);
382}
383
Will Newtonf95f3852011-01-02 01:11:59 -0500384static void dw_mci_idmac_stop_dma(struct dw_mci *host)
385{
386 u32 temp;
387
388 /* Disable and reset the IDMAC interface */
389 temp = mci_readl(host, CTRL);
390 temp &= ~SDMMC_CTRL_USE_IDMAC;
391 temp |= SDMMC_CTRL_DMA_RESET;
392 mci_writel(host, CTRL, temp);
393
394 /* Stop the IDMAC running */
395 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900396 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900397 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500398 mci_writel(host, BMOD, temp);
399}
400
401static void dw_mci_idmac_complete_dma(struct dw_mci *host)
402{
403 struct mmc_data *data = host->data;
404
Thomas Abraham4a909202012-09-17 18:16:35 +0000405 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500406
407 host->dma_ops->cleanup(host);
408
409 /*
410 * If the card was removed, data will be NULL. No point in trying to
411 * send the stop command or waiting for NBUSY in this case.
412 */
413 if (data) {
414 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
415 tasklet_schedule(&host->tasklet);
416 }
417}
418
419static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
420 unsigned int sg_len)
421{
422 int i;
423 struct idmac_desc *desc = host->sg_cpu;
424
425 for (i = 0; i < sg_len; i++, desc++) {
426 unsigned int length = sg_dma_len(&data->sg[i]);
427 u32 mem_addr = sg_dma_address(&data->sg[i]);
428
429 /* Set the OWN bit and disable interrupts for this descriptor */
430 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
431
432 /* Buffer length */
433 IDMAC_SET_BUFFER1_SIZE(desc, length);
434
435 /* Physical address to DMA to/from */
436 desc->des2 = mem_addr;
437 }
438
439 /* Set first descriptor */
440 desc = host->sg_cpu;
441 desc->des0 |= IDMAC_DES0_FD;
442
443 /* Set last descriptor */
444 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
445 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
446 desc->des0 |= IDMAC_DES0_LD;
447
448 wmb();
449}
450
451static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
452{
453 u32 temp;
454
455 dw_mci_translate_sglist(host, host->data, sg_len);
456
457 /* Select IDMAC interface */
458 temp = mci_readl(host, CTRL);
459 temp |= SDMMC_CTRL_USE_IDMAC;
460 mci_writel(host, CTRL, temp);
461
462 wmb();
463
464 /* Enable the IDMAC */
465 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900466 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500467 mci_writel(host, BMOD, temp);
468
469 /* Start it running */
470 mci_writel(host, PLDMND, 1);
471}
472
473static int dw_mci_idmac_init(struct dw_mci *host)
474{
475 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800476 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500477
478 /* Number of descriptors in the ring buffer */
479 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
480
481 /* Forward link the descriptor list */
482 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
483 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
484
485 /* Set the last descriptor as the end-of-ring descriptor */
486 p->des3 = host->sg_dma;
487 p->des0 = IDMAC_DES0_ER;
488
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900489 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900490
Will Newtonf95f3852011-01-02 01:11:59 -0500491 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900492 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500493 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
494 SDMMC_IDMAC_INT_TI);
495
496 /* Set the descriptor base address */
497 mci_writel(host, DBADDR, host->sg_dma);
498 return 0;
499}
500
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100501static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900502 .init = dw_mci_idmac_init,
503 .start = dw_mci_idmac_start_dma,
504 .stop = dw_mci_idmac_stop_dma,
505 .complete = dw_mci_idmac_complete_dma,
506 .cleanup = dw_mci_dma_cleanup,
507};
508#endif /* CONFIG_MMC_DW_IDMAC */
509
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900510static int dw_mci_pre_dma_transfer(struct dw_mci *host,
511 struct mmc_data *data,
512 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500513{
514 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900515 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500516
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900517 if (!next && data->host_cookie)
518 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500519
520 /*
521 * We don't do DMA on "complex" transfers, i.e. with
522 * non-word-aligned buffers or lengths. Also, we don't bother
523 * with all the DMA setup overhead for short transfers.
524 */
525 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
526 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900527
Will Newtonf95f3852011-01-02 01:11:59 -0500528 if (data->blksz & 3)
529 return -EINVAL;
530
531 for_each_sg(data->sg, sg, data->sg_len, i) {
532 if (sg->offset & 3 || sg->length & 3)
533 return -EINVAL;
534 }
535
Thomas Abraham4a909202012-09-17 18:16:35 +0000536 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900537 data->sg,
538 data->sg_len,
539 dw_mci_get_dma_dir(data));
540 if (sg_len == 0)
541 return -EINVAL;
542
543 if (next)
544 data->host_cookie = sg_len;
545
546 return sg_len;
547}
548
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900549static void dw_mci_pre_req(struct mmc_host *mmc,
550 struct mmc_request *mrq,
551 bool is_first_req)
552{
553 struct dw_mci_slot *slot = mmc_priv(mmc);
554 struct mmc_data *data = mrq->data;
555
556 if (!slot->host->use_dma || !data)
557 return;
558
559 if (data->host_cookie) {
560 data->host_cookie = 0;
561 return;
562 }
563
564 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
565 data->host_cookie = 0;
566}
567
568static void dw_mci_post_req(struct mmc_host *mmc,
569 struct mmc_request *mrq,
570 int err)
571{
572 struct dw_mci_slot *slot = mmc_priv(mmc);
573 struct mmc_data *data = mrq->data;
574
575 if (!slot->host->use_dma || !data)
576 return;
577
578 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000579 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900580 data->sg,
581 data->sg_len,
582 dw_mci_get_dma_dir(data));
583 data->host_cookie = 0;
584}
585
Seungwon Jeon52426892013-08-31 00:13:42 +0900586static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
587{
588#ifdef CONFIG_MMC_DW_IDMAC
589 unsigned int blksz = data->blksz;
590 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
591 u32 fifo_width = 1 << host->data_shift;
592 u32 blksz_depth = blksz / fifo_width, fifoth_val;
593 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
594 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
595
596 tx_wmark = (host->fifo_depth) / 2;
597 tx_wmark_invers = host->fifo_depth - tx_wmark;
598
599 /*
600 * MSIZE is '1',
601 * if blksz is not a multiple of the FIFO width
602 */
603 if (blksz % fifo_width) {
604 msize = 0;
605 rx_wmark = 1;
606 goto done;
607 }
608
609 do {
610 if (!((blksz_depth % mszs[idx]) ||
611 (tx_wmark_invers % mszs[idx]))) {
612 msize = idx;
613 rx_wmark = mszs[idx] - 1;
614 break;
615 }
616 } while (--idx > 0);
617 /*
618 * If idx is '0', it won't be tried
619 * Thus, initial values are uesed
620 */
621done:
622 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
623 mci_writel(host, FIFOTH, fifoth_val);
624#endif
625}
626
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900627static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
628{
629 unsigned int blksz = data->blksz;
630 u32 blksz_depth, fifo_depth;
631 u16 thld_size;
632
633 WARN_ON(!(data->flags & MMC_DATA_READ));
634
635 if (host->timing != MMC_TIMING_MMC_HS200 &&
636 host->timing != MMC_TIMING_UHS_SDR104)
637 goto disable;
638
639 blksz_depth = blksz / (1 << host->data_shift);
640 fifo_depth = host->fifo_depth;
641
642 if (blksz_depth > fifo_depth)
643 goto disable;
644
645 /*
646 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
647 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
648 * Currently just choose blksz.
649 */
650 thld_size = blksz;
651 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
652 return;
653
654disable:
655 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
656}
657
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900658static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
659{
660 int sg_len;
661 u32 temp;
662
663 host->using_dma = 0;
664
665 /* If we don't have a channel, we can't do DMA */
666 if (!host->use_dma)
667 return -ENODEV;
668
669 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900670 if (sg_len < 0) {
671 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900672 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900673 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900674
James Hogan03e8cb52011-06-29 09:28:43 +0100675 host->using_dma = 1;
676
Thomas Abraham4a909202012-09-17 18:16:35 +0000677 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500678 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
679 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
680 sg_len);
681
Seungwon Jeon52426892013-08-31 00:13:42 +0900682 /*
683 * Decide the MSIZE and RX/TX Watermark.
684 * If current block size is same with previous size,
685 * no need to update fifoth.
686 */
687 if (host->prev_blksz != data->blksz)
688 dw_mci_adjust_fifoth(host, data);
689
Will Newtonf95f3852011-01-02 01:11:59 -0500690 /* Enable the DMA interface */
691 temp = mci_readl(host, CTRL);
692 temp |= SDMMC_CTRL_DMA_ENABLE;
693 mci_writel(host, CTRL, temp);
694
695 /* Disable RX/TX IRQs, let DMA handle it */
696 temp = mci_readl(host, INTMASK);
697 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
698 mci_writel(host, INTMASK, temp);
699
700 host->dma_ops->start(host, sg_len);
701
702 return 0;
703}
704
705static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
706{
707 u32 temp;
708
709 data->error = -EINPROGRESS;
710
711 WARN_ON(host->data);
712 host->sg = NULL;
713 host->data = data;
714
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900715 if (data->flags & MMC_DATA_READ) {
James Hogan55c5efbc2011-06-29 09:29:58 +0100716 host->dir_status = DW_MCI_RECV_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900717 dw_mci_ctrl_rd_thld(host, data);
718 } else {
James Hogan55c5efbc2011-06-29 09:29:58 +0100719 host->dir_status = DW_MCI_SEND_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900720 }
James Hogan55c5efbc2011-06-29 09:29:58 +0100721
Will Newtonf95f3852011-01-02 01:11:59 -0500722 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900723 int flags = SG_MITER_ATOMIC;
724 if (host->data->flags & MMC_DATA_READ)
725 flags |= SG_MITER_TO_SG;
726 else
727 flags |= SG_MITER_FROM_SG;
728
729 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500730 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100731 host->part_buf_start = 0;
732 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500733
James Hoganb40af3a2011-06-24 13:54:06 +0100734 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500735 temp = mci_readl(host, INTMASK);
736 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
737 mci_writel(host, INTMASK, temp);
738
739 temp = mci_readl(host, CTRL);
740 temp &= ~SDMMC_CTRL_DMA_ENABLE;
741 mci_writel(host, CTRL, temp);
Seungwon Jeon52426892013-08-31 00:13:42 +0900742
743 /*
744 * Use the initial fifoth_val for PIO mode.
745 * If next issued data may be transfered by DMA mode,
746 * prev_blksz should be invalidated.
747 */
748 mci_writel(host, FIFOTH, host->fifoth_val);
749 host->prev_blksz = 0;
750 } else {
751 /*
752 * Keep the current block size.
753 * It will be used to decide whether to update
754 * fifoth register next time.
755 */
756 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -0500757 }
758}
759
760static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
761{
762 struct dw_mci *host = slot->host;
763 unsigned long timeout = jiffies + msecs_to_jiffies(500);
764 unsigned int cmd_status = 0;
765
766 mci_writel(host, CMDARG, arg);
767 wmb();
768 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
769
770 while (time_before(jiffies, timeout)) {
771 cmd_status = mci_readl(host, CMD);
772 if (!(cmd_status & SDMMC_CMD_START))
773 return;
774 }
775 dev_err(&slot->mmc->class_dev,
776 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
777 cmd, arg, cmd_status);
778}
779
Abhilash Kesavanab269122012-11-19 10:26:21 +0530780static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500781{
782 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900783 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500784 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700785 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500786
Doug Andersonfdf492a2013-08-31 00:11:43 +0900787 if (!clock) {
788 mci_writel(host, CLKENA, 0);
789 mci_send_cmd(slot,
790 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
791 } else if (clock != host->current_speed || force_clkinit) {
792 div = host->bus_hz / clock;
793 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500794 /*
795 * move the + 1 after the divide to prevent
796 * over-clocking the card.
797 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900798 div += 1;
799
Doug Andersonfdf492a2013-08-31 00:11:43 +0900800 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500801
Doug Andersonfdf492a2013-08-31 00:11:43 +0900802 if ((clock << div) != slot->__clk_old || force_clkinit)
803 dev_info(&slot->mmc->class_dev,
804 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
805 slot->id, host->bus_hz, clock,
806 div ? ((host->bus_hz / div) >> 1) :
807 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500808
809 /* disable clock */
810 mci_writel(host, CLKENA, 0);
811 mci_writel(host, CLKSRC, 0);
812
813 /* inform CIU */
814 mci_send_cmd(slot,
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816
817 /* set clock to desired speed */
818 mci_writel(host, CLKDIV, div);
819
820 /* inform CIU */
821 mci_send_cmd(slot,
822 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
823
Doug Anderson9623b5b2012-07-25 08:33:17 -0700824 /* enable clock; only low power if no SDIO */
825 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
826 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
827 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
828 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500829
830 /* inform CIU */
831 mci_send_cmd(slot,
832 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
833
Doug Andersonfdf492a2013-08-31 00:11:43 +0900834 /* keep the clock with reflecting clock dividor */
835 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500836 }
837
Doug Andersonfdf492a2013-08-31 00:11:43 +0900838 host->current_speed = clock;
839
Will Newtonf95f3852011-01-02 01:11:59 -0500840 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900841 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500842}
843
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900844static void __dw_mci_start_request(struct dw_mci *host,
845 struct dw_mci_slot *slot,
846 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500847{
848 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500849 struct mmc_data *data;
850 u32 cmdflags;
851
852 mrq = slot->mrq;
853 if (host->pdata->select_slot)
854 host->pdata->select_slot(slot->id);
855
Will Newtonf95f3852011-01-02 01:11:59 -0500856 host->cur_slot = slot;
857 host->mrq = mrq;
858
859 host->pending_events = 0;
860 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900861 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500862 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900863 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500864
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900865 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500866 if (data) {
867 dw_mci_set_timeout(host);
868 mci_writel(host, BYTCNT, data->blksz*data->blocks);
869 mci_writel(host, BLKSIZ, data->blksz);
870 }
871
Will Newtonf95f3852011-01-02 01:11:59 -0500872 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
873
874 /* this is the first command, send the initialization clock */
875 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
876 cmdflags |= SDMMC_CMD_INIT;
877
878 if (data) {
879 dw_mci_submit_data(host, data);
880 wmb();
881 }
882
883 dw_mci_start_command(host, cmd, cmdflags);
884
885 if (mrq->stop)
886 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +0900887 else
888 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -0500889}
890
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900891static void dw_mci_start_request(struct dw_mci *host,
892 struct dw_mci_slot *slot)
893{
894 struct mmc_request *mrq = slot->mrq;
895 struct mmc_command *cmd;
896
897 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
898 __dw_mci_start_request(host, slot, cmd);
899}
900
James Hogan7456caa2011-06-24 13:55:10 +0100901/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500902static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
903 struct mmc_request *mrq)
904{
905 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
906 host->state);
907
Will Newtonf95f3852011-01-02 01:11:59 -0500908 slot->mrq = mrq;
909
910 if (host->state == STATE_IDLE) {
911 host->state = STATE_SENDING_CMD;
912 dw_mci_start_request(host, slot);
913 } else {
914 list_add_tail(&slot->queue_node, &host->queue);
915 }
Will Newtonf95f3852011-01-02 01:11:59 -0500916}
917
918static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
919{
920 struct dw_mci_slot *slot = mmc_priv(mmc);
921 struct dw_mci *host = slot->host;
922
923 WARN_ON(slot->mrq);
924
James Hogan7456caa2011-06-24 13:55:10 +0100925 /*
926 * The check for card presence and queueing of the request must be
927 * atomic, otherwise the card could be removed in between and the
928 * request wouldn't fail until another card was inserted.
929 */
930 spin_lock_bh(&host->lock);
931
Will Newtonf95f3852011-01-02 01:11:59 -0500932 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100933 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500934 mrq->cmd->error = -ENOMEDIUM;
935 mmc_request_done(mmc, mrq);
936 return;
937 }
938
Will Newtonf95f3852011-01-02 01:11:59 -0500939 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100940
941 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500942}
943
944static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
945{
946 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000947 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900948 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500949
Will Newtonf95f3852011-01-02 01:11:59 -0500950 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500951 case MMC_BUS_WIDTH_4:
952 slot->ctype = SDMMC_CTYPE_4BIT;
953 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900954 case MMC_BUS_WIDTH_8:
955 slot->ctype = SDMMC_CTYPE_8BIT;
956 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900957 default:
958 /* set default 1 bit mode */
959 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500960 }
961
Seungwon Jeon3f514292012-01-02 16:00:02 +0900962 regs = mci_readl(slot->host, UHS_REG);
963
Jaehoon Chung41babf72011-02-24 13:46:11 +0900964 /* DDR mode set */
Seungwon Jeon3f514292012-01-02 16:00:02 +0900965 if (ios->timing == MMC_TIMING_UHS_DDR50)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900966 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900967 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900968 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900969
970 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900971 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900972
Doug Andersonfdf492a2013-08-31 00:11:43 +0900973 /*
974 * Use mirror of ios->clock to prevent race with mmc
975 * core ios update when finding the minimum.
976 */
977 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500978
James Hogancb27a842012-10-16 09:43:08 +0100979 if (drv_data && drv_data->set_ios)
980 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000981
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900982 /* Slot specific timing and width adjustment */
983 dw_mci_setup_bus(slot, false);
984
Will Newtonf95f3852011-01-02 01:11:59 -0500985 switch (ios->power_mode) {
986 case MMC_POWER_UP:
987 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
James Hogane6f34e22013-03-12 10:43:32 +0000988 /* Power up slot */
989 if (slot->host->pdata->setpower)
990 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900991 regs = mci_readl(slot->host, PWREN);
992 regs |= (1 << slot->id);
993 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000994 break;
995 case MMC_POWER_OFF:
996 /* Power down slot */
997 if (slot->host->pdata->setpower)
998 slot->host->pdata->setpower(slot->id, 0);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900999 regs = mci_readl(slot->host, PWREN);
1000 regs &= ~(1 << slot->id);
1001 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -05001002 break;
1003 default:
1004 break;
1005 }
1006}
1007
1008static int dw_mci_get_ro(struct mmc_host *mmc)
1009{
1010 int read_only;
1011 struct dw_mci_slot *slot = mmc_priv(mmc);
1012 struct dw_mci_board *brd = slot->host->pdata;
1013
1014 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +00001015 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +00001016 read_only = 0;
1017 else if (brd->get_ro)
Will Newtonf95f3852011-01-02 01:11:59 -05001018 read_only = brd->get_ro(slot->id);
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001019 else if (gpio_is_valid(slot->wp_gpio))
1020 read_only = gpio_get_value(slot->wp_gpio);
Will Newtonf95f3852011-01-02 01:11:59 -05001021 else
1022 read_only =
1023 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024
1025 dev_dbg(&mmc->class_dev, "card is %s\n",
1026 read_only ? "read-only" : "read-write");
1027
1028 return read_only;
1029}
1030
1031static int dw_mci_get_cd(struct mmc_host *mmc)
1032{
1033 int present;
1034 struct dw_mci_slot *slot = mmc_priv(mmc);
1035 struct dw_mci_board *brd = slot->host->pdata;
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001036 int gpio_cd = !mmc_gpio_get_cd(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001037
1038 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001039 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1040 present = 1;
1041 else if (brd->get_cd)
Will Newtonf95f3852011-01-02 01:11:59 -05001042 present = !brd->get_cd(slot->id);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001043 else if (!IS_ERR_VALUE(gpio_cd))
1044 present = !!gpio_cd;
Will Newtonf95f3852011-01-02 01:11:59 -05001045 else
1046 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1047 == 0 ? 1 : 0;
1048
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001049 if (present) {
1050 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001051 dev_dbg(&mmc->class_dev, "card is present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001052 } else {
1053 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001054 dev_dbg(&mmc->class_dev, "card is not present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001055 }
Will Newtonf95f3852011-01-02 01:11:59 -05001056
1057 return present;
1058}
1059
Doug Anderson9623b5b2012-07-25 08:33:17 -07001060/*
1061 * Disable lower power mode.
1062 *
1063 * Low power mode will stop the card clock when idle. According to the
1064 * description of the CLKENA register we should disable low power mode
1065 * for SDIO cards if we need SDIO interrupts to work.
1066 *
1067 * This function is fast if low power mode is already disabled.
1068 */
1069static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1070{
1071 struct dw_mci *host = slot->host;
1072 u32 clk_en_a;
1073 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1074
1075 clk_en_a = mci_readl(host, CLKENA);
1076
1077 if (clk_en_a & clken_low_pwr) {
1078 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1079 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1080 SDMMC_CMD_PRV_DAT_WAIT, 0);
1081 }
1082}
1083
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301084static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1085{
1086 struct dw_mci_slot *slot = mmc_priv(mmc);
1087 struct dw_mci *host = slot->host;
1088 u32 int_mask;
1089
1090 /* Enable/disable Slot Specific SDIO interrupt */
1091 int_mask = mci_readl(host, INTMASK);
1092 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -07001093 /*
1094 * Turn off low power mode if it was enabled. This is a bit of
1095 * a heavy operation and we disable / enable IRQs a lot, so
1096 * we'll leave low power mode disabled and it will get
1097 * re-enabled again in dw_mci_setup_bus().
1098 */
1099 dw_mci_disable_low_power(slot);
1100
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301101 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001102 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301103 } else {
1104 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001105 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301106 }
1107}
1108
Seungwon Jeon0976f162013-08-31 00:12:42 +09001109static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1110{
1111 struct dw_mci_slot *slot = mmc_priv(mmc);
1112 struct dw_mci *host = slot->host;
1113 const struct dw_mci_drv_data *drv_data = host->drv_data;
1114 struct dw_mci_tuning_data tuning_data;
1115 int err = -ENOSYS;
1116
1117 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1118 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1119 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1120 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1121 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1122 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1123 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1124 } else {
1125 return -EINVAL;
1126 }
1127 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1128 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1129 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1130 } else {
1131 dev_err(host->dev,
1132 "Undefined command(%d) for tuning\n", opcode);
1133 return -EINVAL;
1134 }
1135
1136 if (drv_data && drv_data->execute_tuning)
1137 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1138 return err;
1139}
1140
Will Newtonf95f3852011-01-02 01:11:59 -05001141static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301142 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001143 .pre_req = dw_mci_pre_req,
1144 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301145 .set_ios = dw_mci_set_ios,
1146 .get_ro = dw_mci_get_ro,
1147 .get_cd = dw_mci_get_cd,
1148 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001149 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001150};
1151
1152static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1153 __releases(&host->lock)
1154 __acquires(&host->lock)
1155{
1156 struct dw_mci_slot *slot;
1157 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1158
1159 WARN_ON(host->cmd || host->data);
1160
1161 host->cur_slot->mrq = NULL;
1162 host->mrq = NULL;
1163 if (!list_empty(&host->queue)) {
1164 slot = list_entry(host->queue.next,
1165 struct dw_mci_slot, queue_node);
1166 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001167 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001168 mmc_hostname(slot->mmc));
1169 host->state = STATE_SENDING_CMD;
1170 dw_mci_start_request(host, slot);
1171 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001172 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001173 host->state = STATE_IDLE;
1174 }
1175
1176 spin_unlock(&host->lock);
1177 mmc_request_done(prev_mmc, mrq);
1178 spin_lock(&host->lock);
1179}
1180
Seungwon Jeone352c812013-08-31 00:14:17 +09001181static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001182{
1183 u32 status = host->cmd_status;
1184
1185 host->cmd_status = 0;
1186
1187 /* Read the response from the card (up to 16 bytes) */
1188 if (cmd->flags & MMC_RSP_PRESENT) {
1189 if (cmd->flags & MMC_RSP_136) {
1190 cmd->resp[3] = mci_readl(host, RESP0);
1191 cmd->resp[2] = mci_readl(host, RESP1);
1192 cmd->resp[1] = mci_readl(host, RESP2);
1193 cmd->resp[0] = mci_readl(host, RESP3);
1194 } else {
1195 cmd->resp[0] = mci_readl(host, RESP0);
1196 cmd->resp[1] = 0;
1197 cmd->resp[2] = 0;
1198 cmd->resp[3] = 0;
1199 }
1200 }
1201
1202 if (status & SDMMC_INT_RTO)
1203 cmd->error = -ETIMEDOUT;
1204 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1205 cmd->error = -EILSEQ;
1206 else if (status & SDMMC_INT_RESP_ERR)
1207 cmd->error = -EIO;
1208 else
1209 cmd->error = 0;
1210
1211 if (cmd->error) {
1212 /* newer ip versions need a delay between retries */
1213 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1214 mdelay(20);
Will Newtonf95f3852011-01-02 01:11:59 -05001215 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001216
1217 return cmd->error;
1218}
1219
1220static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1221{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001222 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001223
1224 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1225 if (status & SDMMC_INT_DRTO) {
1226 data->error = -ETIMEDOUT;
1227 } else if (status & SDMMC_INT_DCRC) {
1228 data->error = -EILSEQ;
1229 } else if (status & SDMMC_INT_EBE) {
1230 if (host->dir_status ==
1231 DW_MCI_SEND_STATUS) {
1232 /*
1233 * No data CRC status was returned.
1234 * The number of bytes transferred
1235 * will be exaggerated in PIO mode.
1236 */
1237 data->bytes_xfered = 0;
1238 data->error = -ETIMEDOUT;
1239 } else if (host->dir_status ==
1240 DW_MCI_RECV_STATUS) {
1241 data->error = -EIO;
1242 }
1243 } else {
1244 /* SDMMC_INT_SBE is included */
1245 data->error = -EIO;
1246 }
1247
1248 dev_err(host->dev, "data error, status 0x%08x\n", status);
1249
1250 /*
1251 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001252 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001253 */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001254 dw_mci_fifo_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001255 } else {
1256 data->bytes_xfered = data->blocks * data->blksz;
1257 data->error = 0;
1258 }
1259
1260 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001261}
1262
1263static void dw_mci_tasklet_func(unsigned long priv)
1264{
1265 struct dw_mci *host = (struct dw_mci *)priv;
1266 struct mmc_data *data;
1267 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001268 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001269 enum dw_mci_state state;
1270 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001271 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001272
1273 spin_lock(&host->lock);
1274
1275 state = host->state;
1276 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001277 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001278
1279 do {
1280 prev_state = state;
1281
1282 switch (state) {
1283 case STATE_IDLE:
1284 break;
1285
1286 case STATE_SENDING_CMD:
1287 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1288 &host->pending_events))
1289 break;
1290
1291 cmd = host->cmd;
1292 host->cmd = NULL;
1293 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001294 err = dw_mci_command_complete(host, cmd);
1295 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001296 prev_state = state = STATE_SENDING_CMD;
1297 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001298 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001299 goto unlock;
1300 }
1301
Seungwon Jeone352c812013-08-31 00:14:17 +09001302 if (cmd->data && err) {
Seungwon Jeon71abb132013-08-31 00:13:59 +09001303 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001304 send_stop_abort(host, data);
1305 state = STATE_SENDING_STOP;
1306 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001307 }
1308
Seungwon Jeone352c812013-08-31 00:14:17 +09001309 if (!cmd->data || err) {
1310 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001311 goto unlock;
1312 }
1313
1314 prev_state = state = STATE_SENDING_DATA;
1315 /* fall through */
1316
1317 case STATE_SENDING_DATA:
1318 if (test_and_clear_bit(EVENT_DATA_ERROR,
1319 &host->pending_events)) {
1320 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001321 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001322 state = STATE_DATA_ERROR;
1323 break;
1324 }
1325
1326 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1327 &host->pending_events))
1328 break;
1329
1330 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1331 prev_state = state = STATE_DATA_BUSY;
1332 /* fall through */
1333
1334 case STATE_DATA_BUSY:
1335 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1336 &host->pending_events))
1337 break;
1338
1339 host->data = NULL;
1340 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001341 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001342
Seungwon Jeone352c812013-08-31 00:14:17 +09001343 if (!err) {
1344 if (!data->stop || mrq->sbc) {
1345 if (mrq->sbc)
1346 data->stop->error = 0;
1347 dw_mci_request_end(host, mrq);
1348 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001349 }
Will Newtonf95f3852011-01-02 01:11:59 -05001350
Seungwon Jeon90c21432013-08-31 00:14:05 +09001351 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001352 if (data->stop)
1353 send_stop_abort(host, data);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001354 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001355
1356 /*
1357 * If err has non-zero,
1358 * stop-abort command has been already issued.
1359 */
1360 prev_state = state = STATE_SENDING_STOP;
1361
Will Newtonf95f3852011-01-02 01:11:59 -05001362 /* fall through */
1363
1364 case STATE_SENDING_STOP:
1365 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1366 &host->pending_events))
1367 break;
1368
Seungwon Jeon71abb132013-08-31 00:13:59 +09001369 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001370 if (mrq->cmd->error && mrq->data)
1371 dw_mci_fifo_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09001372
Will Newtonf95f3852011-01-02 01:11:59 -05001373 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001374 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09001375
Seungwon Jeone352c812013-08-31 00:14:17 +09001376 if (mrq->stop)
1377 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001378 else
1379 host->cmd_status = 0;
1380
Seungwon Jeone352c812013-08-31 00:14:17 +09001381 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001382 goto unlock;
1383
1384 case STATE_DATA_ERROR:
1385 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1386 &host->pending_events))
1387 break;
1388
1389 state = STATE_DATA_BUSY;
1390 break;
1391 }
1392 } while (state != prev_state);
1393
1394 host->state = state;
1395unlock:
1396 spin_unlock(&host->lock);
1397
1398}
1399
James Hogan34b664a2011-06-24 13:57:56 +01001400/* push final bytes to part_buf, only use during push */
1401static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402{
1403 memcpy((void *)&host->part_buf, buf, cnt);
1404 host->part_buf_count = cnt;
1405}
1406
1407/* append bytes to part_buf, only use during push */
1408static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1409{
1410 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1411 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1412 host->part_buf_count += cnt;
1413 return cnt;
1414}
1415
1416/* pull first bytes from part_buf, only use during pull */
1417static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1418{
1419 cnt = min(cnt, (int)host->part_buf_count);
1420 if (cnt) {
1421 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1422 cnt);
1423 host->part_buf_count -= cnt;
1424 host->part_buf_start += cnt;
1425 }
1426 return cnt;
1427}
1428
1429/* pull final bytes from the part_buf, assuming it's just been filled */
1430static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1431{
1432 memcpy(buf, &host->part_buf, cnt);
1433 host->part_buf_start = cnt;
1434 host->part_buf_count = (1 << host->data_shift) - cnt;
1435}
1436
Will Newtonf95f3852011-01-02 01:11:59 -05001437static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1438{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001439 struct mmc_data *data = host->data;
1440 int init_cnt = cnt;
1441
James Hogan34b664a2011-06-24 13:57:56 +01001442 /* try and push anything in the part_buf */
1443 if (unlikely(host->part_buf_count)) {
1444 int len = dw_mci_push_part_bytes(host, buf, cnt);
1445 buf += len;
1446 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001447 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001448 mci_writew(host, DATA(host->data_offset),
1449 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001450 host->part_buf_count = 0;
1451 }
1452 }
1453#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1454 if (unlikely((unsigned long)buf & 0x1)) {
1455 while (cnt >= 2) {
1456 u16 aligned_buf[64];
1457 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1458 int items = len >> 1;
1459 int i;
1460 /* memcpy from input buffer into aligned buffer */
1461 memcpy(aligned_buf, buf, len);
1462 buf += len;
1463 cnt -= len;
1464 /* push data from aligned buffer into fifo */
1465 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001466 mci_writew(host, DATA(host->data_offset),
1467 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001468 }
1469 } else
1470#endif
1471 {
1472 u16 *pdata = buf;
1473 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001474 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001475 buf = pdata;
1476 }
1477 /* put anything remaining in the part_buf */
1478 if (cnt) {
1479 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001480 /* Push data if we have reached the expected data length */
1481 if ((data->bytes_xfered + init_cnt) ==
1482 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001483 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001484 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001485 }
1486}
1487
1488static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1489{
James Hogan34b664a2011-06-24 13:57:56 +01001490#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1491 if (unlikely((unsigned long)buf & 0x1)) {
1492 while (cnt >= 2) {
1493 /* pull data from fifo into aligned buffer */
1494 u16 aligned_buf[64];
1495 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1496 int items = len >> 1;
1497 int i;
1498 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001499 aligned_buf[i] = mci_readw(host,
1500 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001501 /* memcpy from aligned buffer into output buffer */
1502 memcpy(buf, aligned_buf, len);
1503 buf += len;
1504 cnt -= len;
1505 }
1506 } else
1507#endif
1508 {
1509 u16 *pdata = buf;
1510 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001511 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001512 buf = pdata;
1513 }
1514 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001515 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001516 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001517 }
1518}
1519
1520static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1521{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001522 struct mmc_data *data = host->data;
1523 int init_cnt = cnt;
1524
James Hogan34b664a2011-06-24 13:57:56 +01001525 /* try and push anything in the part_buf */
1526 if (unlikely(host->part_buf_count)) {
1527 int len = dw_mci_push_part_bytes(host, buf, cnt);
1528 buf += len;
1529 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001530 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001531 mci_writel(host, DATA(host->data_offset),
1532 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001533 host->part_buf_count = 0;
1534 }
1535 }
1536#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1537 if (unlikely((unsigned long)buf & 0x3)) {
1538 while (cnt >= 4) {
1539 u32 aligned_buf[32];
1540 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1541 int items = len >> 2;
1542 int i;
1543 /* memcpy from input buffer into aligned buffer */
1544 memcpy(aligned_buf, buf, len);
1545 buf += len;
1546 cnt -= len;
1547 /* push data from aligned buffer into fifo */
1548 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001549 mci_writel(host, DATA(host->data_offset),
1550 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001551 }
1552 } else
1553#endif
1554 {
1555 u32 *pdata = buf;
1556 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001557 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001558 buf = pdata;
1559 }
1560 /* put anything remaining in the part_buf */
1561 if (cnt) {
1562 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001563 /* Push data if we have reached the expected data length */
1564 if ((data->bytes_xfered + init_cnt) ==
1565 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001566 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001567 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001568 }
1569}
1570
1571static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1572{
James Hogan34b664a2011-06-24 13:57:56 +01001573#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1574 if (unlikely((unsigned long)buf & 0x3)) {
1575 while (cnt >= 4) {
1576 /* pull data from fifo into aligned buffer */
1577 u32 aligned_buf[32];
1578 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1579 int items = len >> 2;
1580 int i;
1581 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001582 aligned_buf[i] = mci_readl(host,
1583 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001584 /* memcpy from aligned buffer into output buffer */
1585 memcpy(buf, aligned_buf, len);
1586 buf += len;
1587 cnt -= len;
1588 }
1589 } else
1590#endif
1591 {
1592 u32 *pdata = buf;
1593 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001594 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001595 buf = pdata;
1596 }
1597 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001598 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001599 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001600 }
1601}
1602
1603static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1604{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001605 struct mmc_data *data = host->data;
1606 int init_cnt = cnt;
1607
James Hogan34b664a2011-06-24 13:57:56 +01001608 /* try and push anything in the part_buf */
1609 if (unlikely(host->part_buf_count)) {
1610 int len = dw_mci_push_part_bytes(host, buf, cnt);
1611 buf += len;
1612 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001613
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001614 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001615 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001616 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001617 host->part_buf_count = 0;
1618 }
1619 }
1620#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1621 if (unlikely((unsigned long)buf & 0x7)) {
1622 while (cnt >= 8) {
1623 u64 aligned_buf[16];
1624 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1625 int items = len >> 3;
1626 int i;
1627 /* memcpy from input buffer into aligned buffer */
1628 memcpy(aligned_buf, buf, len);
1629 buf += len;
1630 cnt -= len;
1631 /* push data from aligned buffer into fifo */
1632 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001633 mci_writeq(host, DATA(host->data_offset),
1634 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001635 }
1636 } else
1637#endif
1638 {
1639 u64 *pdata = buf;
1640 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001641 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001642 buf = pdata;
1643 }
1644 /* put anything remaining in the part_buf */
1645 if (cnt) {
1646 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001647 /* Push data if we have reached the expected data length */
1648 if ((data->bytes_xfered + init_cnt) ==
1649 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001650 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001651 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001652 }
1653}
1654
1655static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1656{
James Hogan34b664a2011-06-24 13:57:56 +01001657#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1658 if (unlikely((unsigned long)buf & 0x7)) {
1659 while (cnt >= 8) {
1660 /* pull data from fifo into aligned buffer */
1661 u64 aligned_buf[16];
1662 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1663 int items = len >> 3;
1664 int i;
1665 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001666 aligned_buf[i] = mci_readq(host,
1667 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001668 /* memcpy from aligned buffer into output buffer */
1669 memcpy(buf, aligned_buf, len);
1670 buf += len;
1671 cnt -= len;
1672 }
1673 } else
1674#endif
1675 {
1676 u64 *pdata = buf;
1677 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001678 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001679 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001680 }
James Hogan34b664a2011-06-24 13:57:56 +01001681 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001682 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001683 dw_mci_pull_final_bytes(host, buf, cnt);
1684 }
1685}
1686
1687static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1688{
1689 int len;
1690
1691 /* get remaining partial bytes */
1692 len = dw_mci_pull_part_bytes(host, buf, cnt);
1693 if (unlikely(len == cnt))
1694 return;
1695 buf += len;
1696 cnt -= len;
1697
1698 /* get the rest of the data */
1699 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001700}
1701
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001702static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001703{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001704 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1705 void *buf;
1706 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001707 struct mmc_data *data = host->data;
1708 int shift = host->data_shift;
1709 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001710 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001711 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001712
1713 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001714 if (!sg_miter_next(sg_miter))
1715 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001716
Imre Deak4225fc82013-02-27 17:02:57 -08001717 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001718 buf = sg_miter->addr;
1719 remain = sg_miter->length;
1720 offset = 0;
1721
1722 do {
1723 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1724 << shift) + host->part_buf_count;
1725 len = min(remain, fcnt);
1726 if (!len)
1727 break;
1728 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001729 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001730 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001731 remain -= len;
1732 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001733
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001734 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001735 status = mci_readl(host, MINTSTS);
1736 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001737 /* if the RXDR is ready read again */
1738 } while ((status & SDMMC_INT_RXDR) ||
1739 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001740
1741 if (!remain) {
1742 if (!sg_miter_next(sg_miter))
1743 goto done;
1744 sg_miter->consumed = 0;
1745 }
1746 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001747 return;
1748
1749done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001750 sg_miter_stop(sg_miter);
1751 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001752 smp_wmb();
1753 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1754}
1755
1756static void dw_mci_write_data_pio(struct dw_mci *host)
1757{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001758 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1759 void *buf;
1760 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001761 struct mmc_data *data = host->data;
1762 int shift = host->data_shift;
1763 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001764 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001765 unsigned int fifo_depth = host->fifo_depth;
1766 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001767
1768 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001769 if (!sg_miter_next(sg_miter))
1770 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001771
Imre Deak4225fc82013-02-27 17:02:57 -08001772 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001773 buf = sg_miter->addr;
1774 remain = sg_miter->length;
1775 offset = 0;
1776
1777 do {
1778 fcnt = ((fifo_depth -
1779 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1780 << shift) - host->part_buf_count;
1781 len = min(remain, fcnt);
1782 if (!len)
1783 break;
1784 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001785 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001786 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001787 remain -= len;
1788 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001789
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001790 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001791 status = mci_readl(host, MINTSTS);
1792 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001793 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001794
1795 if (!remain) {
1796 if (!sg_miter_next(sg_miter))
1797 goto done;
1798 sg_miter->consumed = 0;
1799 }
1800 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001801 return;
1802
1803done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001804 sg_miter_stop(sg_miter);
1805 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001806 smp_wmb();
1807 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1808}
1809
1810static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1811{
1812 if (!host->cmd_status)
1813 host->cmd_status = status;
1814
1815 smp_wmb();
1816
1817 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1818 tasklet_schedule(&host->tasklet);
1819}
1820
1821static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1822{
1823 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001824 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301825 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001826
Markos Chandras1fb5f682013-03-12 10:53:11 +00001827 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1828
Doug Anderson476d79f2013-07-09 13:04:40 -07001829 /*
1830 * DTO fix - version 2.10a and below, and only if internal DMA
1831 * is configured.
1832 */
1833 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1834 if (!pending &&
1835 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1836 pending |= SDMMC_INT_DATA_OVER;
1837 }
1838
Markos Chandras1fb5f682013-03-12 10:53:11 +00001839 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001840 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1841 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001842 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001843 smp_wmb();
1844 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001845 }
1846
1847 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1848 /* if there is an error report DATA_ERROR */
1849 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001850 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001851 smp_wmb();
1852 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001853 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001854 }
1855
1856 if (pending & SDMMC_INT_DATA_OVER) {
1857 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1858 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001859 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001860 smp_wmb();
1861 if (host->dir_status == DW_MCI_RECV_STATUS) {
1862 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001863 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001864 }
1865 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1866 tasklet_schedule(&host->tasklet);
1867 }
1868
1869 if (pending & SDMMC_INT_RXDR) {
1870 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001871 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001872 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001873 }
1874
1875 if (pending & SDMMC_INT_TXDR) {
1876 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001877 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001878 dw_mci_write_data_pio(host);
1879 }
1880
1881 if (pending & SDMMC_INT_CMD_DONE) {
1882 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001883 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001884 }
1885
1886 if (pending & SDMMC_INT_CD) {
1887 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001888 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001889 }
1890
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301891 /* Handle SDIO Interrupts */
1892 for (i = 0; i < host->num_slots; i++) {
1893 struct dw_mci_slot *slot = host->slot[i];
1894 if (pending & SDMMC_INT_SDIO(i)) {
1895 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1896 mmc_signal_sdio_irq(slot->mmc);
1897 }
1898 }
1899
Markos Chandras1fb5f682013-03-12 10:53:11 +00001900 }
Will Newtonf95f3852011-01-02 01:11:59 -05001901
1902#ifdef CONFIG_MMC_DW_IDMAC
1903 /* Handle DMA interrupts */
1904 pending = mci_readl(host, IDSTS);
1905 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1906 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1907 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001908 host->dma_ops->complete(host);
1909 }
1910#endif
1911
1912 return IRQ_HANDLED;
1913}
1914
James Hogan1791b13e2011-06-24 13:55:55 +01001915static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001916{
James Hogan1791b13e2011-06-24 13:55:55 +01001917 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001918 int i;
1919
1920 for (i = 0; i < host->num_slots; i++) {
1921 struct dw_mci_slot *slot = host->slot[i];
1922 struct mmc_host *mmc = slot->mmc;
1923 struct mmc_request *mrq;
1924 int present;
Will Newtonf95f3852011-01-02 01:11:59 -05001925
1926 present = dw_mci_get_cd(mmc);
1927 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001928 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1929 present ? "inserted" : "removed");
1930
James Hogan1791b13e2011-06-24 13:55:55 +01001931 spin_lock_bh(&host->lock);
1932
Will Newtonf95f3852011-01-02 01:11:59 -05001933 /* Card change detected */
1934 slot->last_detect_state = present;
1935
Will Newtonf95f3852011-01-02 01:11:59 -05001936 /* Clean up queue if present */
1937 mrq = slot->mrq;
1938 if (mrq) {
1939 if (mrq == host->mrq) {
1940 host->data = NULL;
1941 host->cmd = NULL;
1942
1943 switch (host->state) {
1944 case STATE_IDLE:
1945 break;
1946 case STATE_SENDING_CMD:
1947 mrq->cmd->error = -ENOMEDIUM;
1948 if (!mrq->data)
1949 break;
1950 /* fall through */
1951 case STATE_SENDING_DATA:
1952 mrq->data->error = -ENOMEDIUM;
1953 dw_mci_stop_dma(host);
1954 break;
1955 case STATE_DATA_BUSY:
1956 case STATE_DATA_ERROR:
1957 if (mrq->data->error == -EINPROGRESS)
1958 mrq->data->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001959 /* fall through */
1960 case STATE_SENDING_STOP:
Seungwon Jeon90c21432013-08-31 00:14:05 +09001961 if (mrq->stop)
1962 mrq->stop->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001963 break;
1964 }
1965
1966 dw_mci_request_end(host, mrq);
1967 } else {
1968 list_del(&slot->queue_node);
1969 mrq->cmd->error = -ENOMEDIUM;
1970 if (mrq->data)
1971 mrq->data->error = -ENOMEDIUM;
1972 if (mrq->stop)
1973 mrq->stop->error = -ENOMEDIUM;
1974
1975 spin_unlock(&host->lock);
1976 mmc_request_done(slot->mmc, mrq);
1977 spin_lock(&host->lock);
1978 }
1979 }
1980
1981 /* Power down slot */
1982 if (present == 0) {
Seungwon Jeon31bff452013-08-31 00:14:23 +09001983 /* Clear down the FIFO */
1984 dw_mci_fifo_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001985#ifdef CONFIG_MMC_DW_IDMAC
Seungwon Jeon5ce9d962013-08-31 00:14:33 +09001986 dw_mci_idmac_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001987#endif
1988
1989 }
1990
James Hogan1791b13e2011-06-24 13:55:55 +01001991 spin_unlock_bh(&host->lock);
1992
Will Newtonf95f3852011-01-02 01:11:59 -05001993 present = dw_mci_get_cd(mmc);
1994 }
1995
1996 mmc_detect_change(slot->mmc,
1997 msecs_to_jiffies(host->pdata->detect_delay_ms));
1998 }
1999}
2000
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002001#ifdef CONFIG_OF
2002/* given a slot id, find out the device node representing that slot */
2003static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2004{
2005 struct device_node *np;
2006 const __be32 *addr;
2007 int len;
2008
2009 if (!dev || !dev->of_node)
2010 return NULL;
2011
2012 for_each_child_of_node(dev->of_node, np) {
2013 addr = of_get_property(np, "reg", &len);
2014 if (!addr || (len < sizeof(int)))
2015 continue;
2016 if (be32_to_cpup(addr) == slot)
2017 return np;
2018 }
2019 return NULL;
2020}
2021
Doug Andersona70aaa62013-01-11 17:03:50 +00002022static struct dw_mci_of_slot_quirks {
2023 char *quirk;
2024 int id;
2025} of_slot_quirks[] = {
2026 {
2027 .quirk = "disable-wp",
2028 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2029 },
2030};
2031
2032static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2033{
2034 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2035 int quirks = 0;
2036 int idx;
2037
2038 /* get quirks */
2039 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2040 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2041 quirks |= of_slot_quirks[idx].id;
2042
2043 return quirks;
2044}
2045
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002046/* find out bus-width for a given slot */
2047static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2048{
2049 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2050 u32 bus_wd = 1;
2051
2052 if (!np)
2053 return 1;
2054
2055 if (of_property_read_u32(np, "bus-width", &bus_wd))
2056 dev_err(dev, "bus-width property not found, assuming width"
2057 " as 1\n");
2058 return bus_wd;
2059}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002060
2061/* find the write protect gpio for a given slot; or -1 if none specified */
2062static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2063{
2064 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2065 int gpio;
2066
2067 if (!np)
2068 return -EINVAL;
2069
2070 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2071
2072 /* Having a missing entry is valid; return silently */
2073 if (!gpio_is_valid(gpio))
2074 return -EINVAL;
2075
2076 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2077 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2078 return -EINVAL;
2079 }
2080
2081 return gpio;
2082}
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002083
2084/* find the cd gpio for a given slot; or -1 if none specified */
2085static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2086 struct mmc_host *mmc)
2087{
2088 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2089 int gpio;
2090
2091 if (!np)
2092 return;
2093
2094 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2095
2096 /* Having a missing entry is valid; return silently */
2097 if (!gpio_is_valid(gpio))
2098 return;
2099
2100 if (mmc_gpio_request_cd(mmc, gpio, 0))
2101 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2102}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002103#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00002104static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2105{
2106 return 0;
2107}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002108static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2109{
2110 return 1;
2111}
2112static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2113{
2114 return NULL;
2115}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002116static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2117{
2118 return -EINVAL;
2119}
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002120static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2121 struct mmc_host *mmc)
2122{
2123 return;
2124}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002125#endif /* CONFIG_OF */
2126
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002127static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002128{
2129 struct mmc_host *mmc;
2130 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002131 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002132 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002133 u32 freq[2];
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002134 u8 bus_width;
Will Newtonf95f3852011-01-02 01:11:59 -05002135
Thomas Abraham4a909202012-09-17 18:16:35 +00002136 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002137 if (!mmc)
2138 return -ENOMEM;
2139
2140 slot = mmc_priv(mmc);
2141 slot->id = id;
2142 slot->mmc = mmc;
2143 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002144 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002145
Doug Andersona70aaa62013-01-11 17:03:50 +00002146 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2147
Will Newtonf95f3852011-01-02 01:11:59 -05002148 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002149 if (of_property_read_u32_array(host->dev->of_node,
2150 "clock-freq-min-max", freq, 2)) {
2151 mmc->f_min = DW_MCI_FREQ_MIN;
2152 mmc->f_max = DW_MCI_FREQ_MAX;
2153 } else {
2154 mmc->f_min = freq[0];
2155 mmc->f_max = freq[1];
2156 }
Will Newtonf95f3852011-01-02 01:11:59 -05002157
2158 if (host->pdata->get_ocr)
2159 mmc->ocr_avail = host->pdata->get_ocr(id);
2160 else
2161 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2162
2163 /*
2164 * Start with slot power disabled, it will be enabled when a card
2165 * is detected.
2166 */
2167 if (host->pdata->setpower)
2168 host->pdata->setpower(id, 0);
2169
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002170 if (host->pdata->caps)
2171 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002172
Abhilash Kesavanab269122012-11-19 10:26:21 +05302173 if (host->pdata->pm_caps)
2174 mmc->pm_caps = host->pdata->pm_caps;
2175
Thomas Abraham800d78b2012-09-17 18:16:42 +00002176 if (host->dev->of_node) {
2177 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2178 if (ctrl_id < 0)
2179 ctrl_id = 0;
2180 } else {
2181 ctrl_id = to_platform_device(host->dev)->id;
2182 }
James Hogancb27a842012-10-16 09:43:08 +01002183 if (drv_data && drv_data->caps)
2184 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002185
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002186 if (host->pdata->caps2)
2187 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002188
Will Newtonf95f3852011-01-02 01:11:59 -05002189 if (host->pdata->get_bus_wd)
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002190 bus_width = host->pdata->get_bus_wd(slot->id);
2191 else if (host->dev->of_node)
2192 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2193 else
2194 bus_width = 1;
2195
2196 switch (bus_width) {
2197 case 8:
2198 mmc->caps |= MMC_CAP_8_BIT_DATA;
2199 case 4:
2200 mmc->caps |= MMC_CAP_4_BIT_DATA;
2201 }
Will Newtonf95f3852011-01-02 01:11:59 -05002202
Will Newtonf95f3852011-01-02 01:11:59 -05002203 if (host->pdata->blk_settings) {
2204 mmc->max_segs = host->pdata->blk_settings->max_segs;
2205 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2206 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2207 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2208 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2209 } else {
2210 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002211#ifdef CONFIG_MMC_DW_IDMAC
2212 mmc->max_segs = host->ring_size;
2213 mmc->max_blk_size = 65536;
2214 mmc->max_blk_count = host->ring_size;
2215 mmc->max_seg_size = 0x1000;
2216 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2217#else
Will Newtonf95f3852011-01-02 01:11:59 -05002218 mmc->max_segs = 64;
2219 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2220 mmc->max_blk_count = 512;
2221 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2222 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002223#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002224 }
Will Newtonf95f3852011-01-02 01:11:59 -05002225
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002226 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002227 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002228
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002229 ret = mmc_add_host(mmc);
2230 if (ret)
2231 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002232
2233#if defined(CONFIG_DEBUG_FS)
2234 dw_mci_init_debugfs(slot);
2235#endif
2236
2237 /* Card initially undetected */
2238 slot->last_detect_state = 0;
2239
Will Newtonf95f3852011-01-02 01:11:59 -05002240 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002241
2242err_setup_bus:
2243 mmc_free_host(mmc);
2244 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002245}
2246
2247static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2248{
2249 /* Shutdown detect IRQ */
2250 if (slot->host->pdata->exit)
2251 slot->host->pdata->exit(id);
2252
2253 /* Debugfs stuff is cleaned up by mmc core */
2254 mmc_remove_host(slot->mmc);
2255 slot->host->slot[id] = NULL;
2256 mmc_free_host(slot->mmc);
2257}
2258
2259static void dw_mci_init_dma(struct dw_mci *host)
2260{
2261 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002262 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002263 &host->sg_dma, GFP_KERNEL);
2264 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002265 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002266 __func__);
2267 goto no_dma;
2268 }
2269
2270 /* Determine which DMA interface to use */
2271#ifdef CONFIG_MMC_DW_IDMAC
2272 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002273 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002274#endif
2275
2276 if (!host->dma_ops)
2277 goto no_dma;
2278
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002279 if (host->dma_ops->init && host->dma_ops->start &&
2280 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002281 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002282 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002283 "DMA Controller.\n", __func__);
2284 goto no_dma;
2285 }
2286 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002287 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002288 goto no_dma;
2289 }
2290
2291 host->use_dma = 1;
2292 return;
2293
2294no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002295 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002296 host->use_dma = 0;
2297 return;
2298}
2299
Seungwon Jeon31bff452013-08-31 00:14:23 +09002300static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002301{
2302 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002303 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002304
Seungwon Jeon31bff452013-08-31 00:14:23 +09002305 ctrl = mci_readl(host, CTRL);
2306 ctrl |= reset;
2307 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002308
2309 /* wait till resets clear */
2310 do {
2311 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002312 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002313 return true;
2314 } while (time_before(jiffies, timeout));
2315
Seungwon Jeon31bff452013-08-31 00:14:23 +09002316 dev_err(host->dev,
2317 "Timeout resetting block (ctrl reset %#x)\n",
2318 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002319
2320 return false;
2321}
2322
Seungwon Jeon31bff452013-08-31 00:14:23 +09002323static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2324{
2325 /*
2326 * Reseting generates a block interrupt, hence setting
2327 * the scatter-gather pointer to NULL.
2328 */
2329 if (host->sg) {
2330 sg_miter_stop(&host->sg_miter);
2331 host->sg = NULL;
2332 }
2333
2334 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2335}
2336
2337static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2338{
2339 return dw_mci_ctrl_reset(host,
2340 SDMMC_CTRL_FIFO_RESET |
2341 SDMMC_CTRL_RESET |
2342 SDMMC_CTRL_DMA_RESET);
2343}
2344
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002345#ifdef CONFIG_OF
2346static struct dw_mci_of_quirks {
2347 char *quirk;
2348 int id;
2349} of_quirks[] = {
2350 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002351 .quirk = "broken-cd",
2352 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2353 },
2354};
2355
2356static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2357{
2358 struct dw_mci_board *pdata;
2359 struct device *dev = host->dev;
2360 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002361 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002362 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002363 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002364
2365 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2366 if (!pdata) {
2367 dev_err(dev, "could not allocate memory for pdata\n");
2368 return ERR_PTR(-ENOMEM);
2369 }
2370
2371 /* find out number of slots supported */
2372 if (of_property_read_u32(dev->of_node, "num-slots",
2373 &pdata->num_slots)) {
2374 dev_info(dev, "num-slots property not found, "
2375 "assuming 1 slot is available\n");
2376 pdata->num_slots = 1;
2377 }
2378
2379 /* get quirks */
2380 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2381 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2382 pdata->quirks |= of_quirks[idx].id;
2383
2384 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2385 dev_info(dev, "fifo-depth property not found, using "
2386 "value of FIFOTH register as default\n");
2387
2388 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2389
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002390 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2391 pdata->bus_hz = clock_frequency;
2392
James Hogancb27a842012-10-16 09:43:08 +01002393 if (drv_data && drv_data->parse_dt) {
2394 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002395 if (ret)
2396 return ERR_PTR(ret);
2397 }
2398
Abhilash Kesavanab269122012-11-19 10:26:21 +05302399 if (of_find_property(np, "keep-power-in-suspend", NULL))
2400 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2401
2402 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2403 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2404
Seungwon Jeon10b49842013-08-31 00:13:22 +09002405 if (of_find_property(np, "supports-highspeed", NULL))
2406 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2407
Seungwon Jeon5dd63f52013-08-31 00:13:09 +09002408 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2409 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2410
2411 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2412 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2413
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002414 return pdata;
2415}
2416
2417#else /* CONFIG_OF */
2418static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2419{
2420 return ERR_PTR(-EINVAL);
2421}
2422#endif /* CONFIG_OF */
2423
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302424int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002425{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002426 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302427 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002428 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002429 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002430
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002431 if (!host->pdata) {
2432 host->pdata = dw_mci_parse_dt(host);
2433 if (IS_ERR(host->pdata)) {
2434 dev_err(host->dev, "platform data not available\n");
2435 return -EINVAL;
2436 }
Will Newtonf95f3852011-01-02 01:11:59 -05002437 }
2438
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302439 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002440 dev_err(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -05002441 "Platform data must supply select_slot function\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302442 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002443 }
2444
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002445 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002446 if (IS_ERR(host->biu_clk)) {
2447 dev_dbg(host->dev, "biu clock not available\n");
2448 } else {
2449 ret = clk_prepare_enable(host->biu_clk);
2450 if (ret) {
2451 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002452 return ret;
2453 }
Will Newtonf95f3852011-01-02 01:11:59 -05002454 }
2455
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002456 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002457 if (IS_ERR(host->ciu_clk)) {
2458 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002459 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002460 } else {
2461 ret = clk_prepare_enable(host->ciu_clk);
2462 if (ret) {
2463 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002464 goto err_clk_biu;
2465 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002466
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002467 if (host->pdata->bus_hz) {
2468 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2469 if (ret)
2470 dev_warn(host->dev,
2471 "Unable to set bus rate to %ul\n",
2472 host->pdata->bus_hz);
2473 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002474 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002475 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002476
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002477 if (drv_data && drv_data->init) {
2478 ret = drv_data->init(host);
2479 if (ret) {
2480 dev_err(host->dev,
2481 "implementation specific init failed\n");
2482 goto err_clk_ciu;
2483 }
2484 }
2485
James Hogancb27a842012-10-16 09:43:08 +01002486 if (drv_data && drv_data->setup_clock) {
2487 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002488 if (ret) {
2489 dev_err(host->dev,
2490 "implementation specific clock setup failed\n");
2491 goto err_clk_ciu;
2492 }
2493 }
2494
Mark Browna55d6ff2013-07-29 21:55:27 +01002495 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002496 if (IS_ERR(host->vmmc)) {
2497 ret = PTR_ERR(host->vmmc);
2498 if (ret == -EPROBE_DEFER)
2499 goto err_clk_ciu;
2500
2501 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2502 host->vmmc = NULL;
2503 } else {
2504 ret = regulator_enable(host->vmmc);
2505 if (ret) {
2506 if (ret != -EPROBE_DEFER)
2507 dev_err(host->dev,
2508 "regulator_enable fail: %d\n", ret);
2509 goto err_clk_ciu;
2510 }
2511 }
2512
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002513 if (!host->bus_hz) {
2514 dev_err(host->dev,
2515 "Platform data must supply bus speed\n");
2516 ret = -ENODEV;
Doug Anderson870556a2013-06-07 10:28:29 -07002517 goto err_regulator;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002518 }
2519
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302520 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002521
2522 spin_lock_init(&host->lock);
2523 INIT_LIST_HEAD(&host->queue);
2524
Will Newtonf95f3852011-01-02 01:11:59 -05002525 /*
2526 * Get the host data width - this assumes that HCON has been set with
2527 * the correct values.
2528 */
2529 i = (mci_readl(host, HCON) >> 7) & 0x7;
2530 if (!i) {
2531 host->push_data = dw_mci_push_data16;
2532 host->pull_data = dw_mci_pull_data16;
2533 width = 16;
2534 host->data_shift = 1;
2535 } else if (i == 2) {
2536 host->push_data = dw_mci_push_data64;
2537 host->pull_data = dw_mci_pull_data64;
2538 width = 64;
2539 host->data_shift = 3;
2540 } else {
2541 /* Check for a reserved value, and warn if it is */
2542 WARN((i != 1),
2543 "HCON reports a reserved host data width!\n"
2544 "Defaulting to 32-bit access.\n");
2545 host->push_data = dw_mci_push_data32;
2546 host->pull_data = dw_mci_pull_data32;
2547 width = 32;
2548 host->data_shift = 2;
2549 }
2550
2551 /* Reset all blocks */
Seungwon Jeon31bff452013-08-31 00:14:23 +09002552 if (!dw_mci_ctrl_all_reset(host))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002553 return -ENODEV;
2554
2555 host->dma_ops = host->pdata->dma_ops;
2556 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002557
2558 /* Clear the interrupts for the host controller */
2559 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2560 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2561
2562 /* Put in max timeout */
2563 mci_writel(host, TMOUT, 0xFFFFFFFF);
2564
2565 /*
2566 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2567 * Tx Mark = fifo_size / 2 DMA Size = 8
2568 */
James Hoganb86d8252011-06-24 13:57:18 +01002569 if (!host->pdata->fifo_depth) {
2570 /*
2571 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2572 * have been overwritten by the bootloader, just like we're
2573 * about to do, so if you know the value for your hardware, you
2574 * should put it in the platform data.
2575 */
2576 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002577 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002578 } else {
2579 fifo_size = host->pdata->fifo_depth;
2580 }
2581 host->fifo_depth = fifo_size;
Seungwon Jeon52426892013-08-31 00:13:42 +09002582 host->fifoth_val =
2583 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002584 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002585
2586 /* disable clock to CIU */
2587 mci_writel(host, CLKENA, 0);
2588 mci_writel(host, CLKSRC, 0);
2589
James Hogan63008762013-03-12 10:43:54 +00002590 /*
2591 * In 2.40a spec, Data offset is changed.
2592 * Need to check the version-id and set data-offset for DATA register.
2593 */
2594 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2595 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2596
2597 if (host->verid < DW_MMC_240A)
2598 host->data_offset = DATA_OFFSET;
2599 else
2600 host->data_offset = DATA_240A_OFFSET;
2601
Will Newtonf95f3852011-01-02 01:11:59 -05002602 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002603 host->card_workqueue = alloc_workqueue("dw-mci-card",
James Hogan1791b13e2011-06-24 13:55:55 +01002604 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002605 if (!host->card_workqueue) {
2606 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002607 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002608 }
James Hogan1791b13e2011-06-24 13:55:55 +01002609 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002610 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2611 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002612 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002613 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002614
Will Newtonf95f3852011-01-02 01:11:59 -05002615 if (host->pdata->num_slots)
2616 host->num_slots = host->pdata->num_slots;
2617 else
2618 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2619
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302620 /*
2621 * Enable interrupts for command done, data over, data empty, card det,
2622 * receive ready and error such as transmit, receive timeout, crc error
2623 */
2624 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2625 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2626 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2627 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2628 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2629
2630 dev_info(host->dev, "DW MMC controller at irq %d, "
2631 "%d bit host data width, "
2632 "%u deep fifo\n",
2633 host->irq, width, fifo_size);
2634
Will Newtonf95f3852011-01-02 01:11:59 -05002635 /* We need at least one slot to succeed */
2636 for (i = 0; i < host->num_slots; i++) {
2637 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002638 if (ret)
2639 dev_dbg(host->dev, "slot %d init failed\n", i);
2640 else
2641 init_slots++;
2642 }
2643
2644 if (init_slots) {
2645 dev_info(host->dev, "%d slots initialized\n", init_slots);
2646 } else {
2647 dev_dbg(host->dev, "attempted to initialize %d slots, "
2648 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002649 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002650 }
2651
Will Newtonf95f3852011-01-02 01:11:59 -05002652 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002653 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002654
2655 return 0;
2656
James Hogan1791b13e2011-06-24 13:55:55 +01002657err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002658 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002659
Will Newtonf95f3852011-01-02 01:11:59 -05002660err_dmaunmap:
2661 if (host->use_dma && host->dma_ops->exit)
2662 host->dma_ops->exit(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002663
Doug Anderson870556a2013-06-07 10:28:29 -07002664err_regulator:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002665 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002666 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002667
2668err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002669 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002670 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002671
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002672err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002673 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002674 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002675
Will Newtonf95f3852011-01-02 01:11:59 -05002676 return ret;
2677}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302678EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002679
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302680void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002681{
Will Newtonf95f3852011-01-02 01:11:59 -05002682 int i;
2683
2684 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2685 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2686
Will Newtonf95f3852011-01-02 01:11:59 -05002687 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002688 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002689 if (host->slot[i])
2690 dw_mci_cleanup_slot(host->slot[i], i);
2691 }
2692
2693 /* disable clock to CIU */
2694 mci_writel(host, CLKENA, 0);
2695 mci_writel(host, CLKSRC, 0);
2696
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002697 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002698
2699 if (host->use_dma && host->dma_ops->exit)
2700 host->dma_ops->exit(host);
2701
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002702 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002703 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002704
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002705 if (!IS_ERR(host->ciu_clk))
2706 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002707
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002708 if (!IS_ERR(host->biu_clk))
2709 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002710}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302711EXPORT_SYMBOL(dw_mci_remove);
2712
2713
Will Newtonf95f3852011-01-02 01:11:59 -05002714
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002715#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002716/*
2717 * TODO: we should probably disable the clock to the card in the suspend path.
2718 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302719int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002720{
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002721 if (host->vmmc)
2722 regulator_disable(host->vmmc);
2723
Will Newtonf95f3852011-01-02 01:11:59 -05002724 return 0;
2725}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302726EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002727
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302728int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002729{
2730 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002731
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302732 if (host->vmmc) {
2733 ret = regulator_enable(host->vmmc);
2734 if (ret) {
2735 dev_err(host->dev,
2736 "failed to enable regulator: %d\n", ret);
2737 return ret;
2738 }
2739 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002740
Seungwon Jeon31bff452013-08-31 00:14:23 +09002741 if (!dw_mci_ctrl_all_reset(host)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002742 ret = -ENODEV;
2743 return ret;
2744 }
2745
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002746 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002747 host->dma_ops->init(host);
2748
Seungwon Jeon52426892013-08-31 00:13:42 +09002749 /*
2750 * Restore the initial value at FIFOTH register
2751 * And Invalidate the prev_blksz with zero
2752 */
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002753 mci_writel(host, FIFOTH, host->fifoth_val);
Seungwon Jeon52426892013-08-31 00:13:42 +09002754 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002755
Doug Anderson2eb29442013-08-31 00:11:49 +09002756 /* Put in max timeout */
2757 mci_writel(host, TMOUT, 0xFFFFFFFF);
2758
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002759 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2760 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2761 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2762 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2763 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2764
Will Newtonf95f3852011-01-02 01:11:59 -05002765 for (i = 0; i < host->num_slots; i++) {
2766 struct dw_mci_slot *slot = host->slot[i];
2767 if (!slot)
2768 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302769 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2770 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2771 dw_mci_setup_bus(slot, true);
2772 }
Will Newtonf95f3852011-01-02 01:11:59 -05002773 }
Will Newtonf95f3852011-01-02 01:11:59 -05002774 return 0;
2775}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302776EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002777#endif /* CONFIG_PM_SLEEP */
2778
Will Newtonf95f3852011-01-02 01:11:59 -05002779static int __init dw_mci_init(void)
2780{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302781 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302782 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002783}
2784
2785static void __exit dw_mci_exit(void)
2786{
Will Newtonf95f3852011-01-02 01:11:59 -05002787}
2788
2789module_init(dw_mci_init);
2790module_exit(dw_mci_exit);
2791
2792MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2793MODULE_AUTHOR("NXP Semiconductor VietNam");
2794MODULE_AUTHOR("Imagination Technologies Ltd");
2795MODULE_LICENSE("GPL v2");