blob: 50da6e330a15061fc2bf98f73817f84381ab49f7 [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
32#include <linux/mmc/dw_mmc.h>
33#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090034#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010035#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000036#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000037#include <linux/of_gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050038
39#include "dw_mmc.h"
40
41/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090042#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050043 SDMMC_INT_HTO | SDMMC_INT_SBE | \
44 SDMMC_INT_EBE)
45#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46 SDMMC_INT_RESP_ERR)
47#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
48 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
49#define DW_MCI_SEND_STATUS 1
50#define DW_MCI_RECV_STATUS 2
51#define DW_MCI_DMA_THRESHOLD 16
52
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090053#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
54#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
55
Will Newtonf95f3852011-01-02 01:11:59 -050056#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090057#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
58 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
59 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
60 SDMMC_IDMAC_INT_TI)
61
Will Newtonf95f3852011-01-02 01:11:59 -050062struct idmac_desc {
63 u32 des0; /* Control Descriptor */
64#define IDMAC_DES0_DIC BIT(1)
65#define IDMAC_DES0_LD BIT(2)
66#define IDMAC_DES0_FD BIT(3)
67#define IDMAC_DES0_CH BIT(4)
68#define IDMAC_DES0_ER BIT(5)
69#define IDMAC_DES0_CES BIT(30)
70#define IDMAC_DES0_OWN BIT(31)
71
72 u32 des1; /* Buffer sizes */
73#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040074 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050075
76 u32 des2; /* buffer 1 physical address */
77
78 u32 des3; /* buffer 2 physical address */
79};
80#endif /* CONFIG_MMC_DW_IDMAC */
81
Seungwon Jeon0976f162013-08-31 00:12:42 +090082static const u8 tuning_blk_pattern_4bit[] = {
83 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
84 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
85 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
86 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
87 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
88 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
89 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
90 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
91};
Will Newtonf95f3852011-01-02 01:11:59 -050092
Seungwon Jeon0976f162013-08-31 00:12:42 +090093static const u8 tuning_blk_pattern_8bit[] = {
94 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
95 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
96 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
97 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
98 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
99 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
100 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
101 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
102 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
103 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
104 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
105 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
106 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
107 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
108 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
109 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500110};
111
112#if defined(CONFIG_DEBUG_FS)
113static int dw_mci_req_show(struct seq_file *s, void *v)
114{
115 struct dw_mci_slot *slot = s->private;
116 struct mmc_request *mrq;
117 struct mmc_command *cmd;
118 struct mmc_command *stop;
119 struct mmc_data *data;
120
121 /* Make sure we get a consistent snapshot */
122 spin_lock_bh(&slot->host->lock);
123 mrq = slot->mrq;
124
125 if (mrq) {
126 cmd = mrq->cmd;
127 data = mrq->data;
128 stop = mrq->stop;
129
130 if (cmd)
131 seq_printf(s,
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 cmd->opcode, cmd->arg, cmd->flags,
134 cmd->resp[0], cmd->resp[1], cmd->resp[2],
135 cmd->resp[2], cmd->error);
136 if (data)
137 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
138 data->bytes_xfered, data->blocks,
139 data->blksz, data->flags, data->error);
140 if (stop)
141 seq_printf(s,
142 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
143 stop->opcode, stop->arg, stop->flags,
144 stop->resp[0], stop->resp[1], stop->resp[2],
145 stop->resp[2], stop->error);
146 }
147
148 spin_unlock_bh(&slot->host->lock);
149
150 return 0;
151}
152
153static int dw_mci_req_open(struct inode *inode, struct file *file)
154{
155 return single_open(file, dw_mci_req_show, inode->i_private);
156}
157
158static const struct file_operations dw_mci_req_fops = {
159 .owner = THIS_MODULE,
160 .open = dw_mci_req_open,
161 .read = seq_read,
162 .llseek = seq_lseek,
163 .release = single_release,
164};
165
166static int dw_mci_regs_show(struct seq_file *s, void *v)
167{
168 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
169 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
170 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
171 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
172 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
173 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
174
175 return 0;
176}
177
178static int dw_mci_regs_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, dw_mci_regs_show, inode->i_private);
181}
182
183static const struct file_operations dw_mci_regs_fops = {
184 .owner = THIS_MODULE,
185 .open = dw_mci_regs_open,
186 .read = seq_read,
187 .llseek = seq_lseek,
188 .release = single_release,
189};
190
191static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
192{
193 struct mmc_host *mmc = slot->mmc;
194 struct dw_mci *host = slot->host;
195 struct dentry *root;
196 struct dentry *node;
197
198 root = mmc->debugfs_root;
199 if (!root)
200 return;
201
202 node = debugfs_create_file("regs", S_IRUSR, root, host,
203 &dw_mci_regs_fops);
204 if (!node)
205 goto err;
206
207 node = debugfs_create_file("req", S_IRUSR, root, slot,
208 &dw_mci_req_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
213 if (!node)
214 goto err;
215
216 node = debugfs_create_x32("pending_events", S_IRUSR, root,
217 (u32 *)&host->pending_events);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("completed_events", S_IRUSR, root,
222 (u32 *)&host->completed_events);
223 if (!node)
224 goto err;
225
226 return;
227
228err:
229 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
230}
231#endif /* defined(CONFIG_DEBUG_FS) */
232
233static void dw_mci_set_timeout(struct dw_mci *host)
234{
235 /* timeout (maximum) */
236 mci_writel(host, TMOUT, 0xffffffff);
237}
238
239static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
240{
241 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000242 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000243 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500244 u32 cmdr;
245 cmd->error = -EINPROGRESS;
246
247 cmdr = cmd->opcode;
248
249 if (cmdr == MMC_STOP_TRANSMISSION)
250 cmdr |= SDMMC_CMD_STOP;
251 else
252 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
253
254 if (cmd->flags & MMC_RSP_PRESENT) {
255 /* We expect a response, so set this bit */
256 cmdr |= SDMMC_CMD_RESP_EXP;
257 if (cmd->flags & MMC_RSP_136)
258 cmdr |= SDMMC_CMD_RESP_LONG;
259 }
260
261 if (cmd->flags & MMC_RSP_CRC)
262 cmdr |= SDMMC_CMD_RESP_CRC;
263
264 data = cmd->data;
265 if (data) {
266 cmdr |= SDMMC_CMD_DAT_EXP;
267 if (data->flags & MMC_DATA_STREAM)
268 cmdr |= SDMMC_CMD_STRM_MODE;
269 if (data->flags & MMC_DATA_WRITE)
270 cmdr |= SDMMC_CMD_DAT_WR;
271 }
272
James Hogancb27a842012-10-16 09:43:08 +0100273 if (drv_data && drv_data->prepare_command)
274 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000275
Will Newtonf95f3852011-01-02 01:11:59 -0500276 return cmdr;
277}
278
279static void dw_mci_start_command(struct dw_mci *host,
280 struct mmc_command *cmd, u32 cmd_flags)
281{
282 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000283 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500284 "start command: ARGR=0x%08x CMDR=0x%08x\n",
285 cmd->arg, cmd_flags);
286
287 mci_writel(host, CMDARG, cmd->arg);
288 wmb();
289
290 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
291}
292
293static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
294{
295 dw_mci_start_command(host, data->stop, host->stop_cmdr);
296}
297
298/* DMA interface functions */
299static void dw_mci_stop_dma(struct dw_mci *host)
300{
James Hogan03e8cb52011-06-29 09:28:43 +0100301 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500302 host->dma_ops->stop(host);
303 host->dma_ops->cleanup(host);
304 } else {
305 /* Data transfer was stopped by the interrupt handler */
306 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
307 }
308}
309
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900310static int dw_mci_get_dma_dir(struct mmc_data *data)
311{
312 if (data->flags & MMC_DATA_WRITE)
313 return DMA_TO_DEVICE;
314 else
315 return DMA_FROM_DEVICE;
316}
317
Jaehoon Chung9beee912012-02-16 11:19:38 +0900318#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500319static void dw_mci_dma_cleanup(struct dw_mci *host)
320{
321 struct mmc_data *data = host->data;
322
323 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900324 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000325 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900326 data->sg,
327 data->sg_len,
328 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500329}
330
331static void dw_mci_idmac_stop_dma(struct dw_mci *host)
332{
333 u32 temp;
334
335 /* Disable and reset the IDMAC interface */
336 temp = mci_readl(host, CTRL);
337 temp &= ~SDMMC_CTRL_USE_IDMAC;
338 temp |= SDMMC_CTRL_DMA_RESET;
339 mci_writel(host, CTRL, temp);
340
341 /* Stop the IDMAC running */
342 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900343 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Will Newtonf95f3852011-01-02 01:11:59 -0500344 mci_writel(host, BMOD, temp);
345}
346
347static void dw_mci_idmac_complete_dma(struct dw_mci *host)
348{
349 struct mmc_data *data = host->data;
350
Thomas Abraham4a909202012-09-17 18:16:35 +0000351 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500352
353 host->dma_ops->cleanup(host);
354
355 /*
356 * If the card was removed, data will be NULL. No point in trying to
357 * send the stop command or waiting for NBUSY in this case.
358 */
359 if (data) {
360 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
361 tasklet_schedule(&host->tasklet);
362 }
363}
364
365static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
366 unsigned int sg_len)
367{
368 int i;
369 struct idmac_desc *desc = host->sg_cpu;
370
371 for (i = 0; i < sg_len; i++, desc++) {
372 unsigned int length = sg_dma_len(&data->sg[i]);
373 u32 mem_addr = sg_dma_address(&data->sg[i]);
374
375 /* Set the OWN bit and disable interrupts for this descriptor */
376 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
377
378 /* Buffer length */
379 IDMAC_SET_BUFFER1_SIZE(desc, length);
380
381 /* Physical address to DMA to/from */
382 desc->des2 = mem_addr;
383 }
384
385 /* Set first descriptor */
386 desc = host->sg_cpu;
387 desc->des0 |= IDMAC_DES0_FD;
388
389 /* Set last descriptor */
390 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
391 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
392 desc->des0 |= IDMAC_DES0_LD;
393
394 wmb();
395}
396
397static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
398{
399 u32 temp;
400
401 dw_mci_translate_sglist(host, host->data, sg_len);
402
403 /* Select IDMAC interface */
404 temp = mci_readl(host, CTRL);
405 temp |= SDMMC_CTRL_USE_IDMAC;
406 mci_writel(host, CTRL, temp);
407
408 wmb();
409
410 /* Enable the IDMAC */
411 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900412 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500413 mci_writel(host, BMOD, temp);
414
415 /* Start it running */
416 mci_writel(host, PLDMND, 1);
417}
418
419static int dw_mci_idmac_init(struct dw_mci *host)
420{
421 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800422 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500423
424 /* Number of descriptors in the ring buffer */
425 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
426
427 /* Forward link the descriptor list */
428 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
429 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
430
431 /* Set the last descriptor as the end-of-ring descriptor */
432 p->des3 = host->sg_dma;
433 p->des0 = IDMAC_DES0_ER;
434
Seungwon Jeon141a7122012-05-22 13:01:03 +0900435 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
436
Will Newtonf95f3852011-01-02 01:11:59 -0500437 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900438 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500439 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
440 SDMMC_IDMAC_INT_TI);
441
442 /* Set the descriptor base address */
443 mci_writel(host, DBADDR, host->sg_dma);
444 return 0;
445}
446
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100447static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900448 .init = dw_mci_idmac_init,
449 .start = dw_mci_idmac_start_dma,
450 .stop = dw_mci_idmac_stop_dma,
451 .complete = dw_mci_idmac_complete_dma,
452 .cleanup = dw_mci_dma_cleanup,
453};
454#endif /* CONFIG_MMC_DW_IDMAC */
455
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900456static int dw_mci_pre_dma_transfer(struct dw_mci *host,
457 struct mmc_data *data,
458 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500459{
460 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900461 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500462
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900463 if (!next && data->host_cookie)
464 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500465
466 /*
467 * We don't do DMA on "complex" transfers, i.e. with
468 * non-word-aligned buffers or lengths. Also, we don't bother
469 * with all the DMA setup overhead for short transfers.
470 */
471 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
472 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900473
Will Newtonf95f3852011-01-02 01:11:59 -0500474 if (data->blksz & 3)
475 return -EINVAL;
476
477 for_each_sg(data->sg, sg, data->sg_len, i) {
478 if (sg->offset & 3 || sg->length & 3)
479 return -EINVAL;
480 }
481
Thomas Abraham4a909202012-09-17 18:16:35 +0000482 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900483 data->sg,
484 data->sg_len,
485 dw_mci_get_dma_dir(data));
486 if (sg_len == 0)
487 return -EINVAL;
488
489 if (next)
490 data->host_cookie = sg_len;
491
492 return sg_len;
493}
494
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900495static void dw_mci_pre_req(struct mmc_host *mmc,
496 struct mmc_request *mrq,
497 bool is_first_req)
498{
499 struct dw_mci_slot *slot = mmc_priv(mmc);
500 struct mmc_data *data = mrq->data;
501
502 if (!slot->host->use_dma || !data)
503 return;
504
505 if (data->host_cookie) {
506 data->host_cookie = 0;
507 return;
508 }
509
510 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
511 data->host_cookie = 0;
512}
513
514static void dw_mci_post_req(struct mmc_host *mmc,
515 struct mmc_request *mrq,
516 int err)
517{
518 struct dw_mci_slot *slot = mmc_priv(mmc);
519 struct mmc_data *data = mrq->data;
520
521 if (!slot->host->use_dma || !data)
522 return;
523
524 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000525 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900526 data->sg,
527 data->sg_len,
528 dw_mci_get_dma_dir(data));
529 data->host_cookie = 0;
530}
531
532static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
533{
534 int sg_len;
535 u32 temp;
536
537 host->using_dma = 0;
538
539 /* If we don't have a channel, we can't do DMA */
540 if (!host->use_dma)
541 return -ENODEV;
542
543 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900544 if (sg_len < 0) {
545 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900546 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900547 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900548
James Hogan03e8cb52011-06-29 09:28:43 +0100549 host->using_dma = 1;
550
Thomas Abraham4a909202012-09-17 18:16:35 +0000551 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500552 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
553 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
554 sg_len);
555
556 /* Enable the DMA interface */
557 temp = mci_readl(host, CTRL);
558 temp |= SDMMC_CTRL_DMA_ENABLE;
559 mci_writel(host, CTRL, temp);
560
561 /* Disable RX/TX IRQs, let DMA handle it */
562 temp = mci_readl(host, INTMASK);
563 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
564 mci_writel(host, INTMASK, temp);
565
566 host->dma_ops->start(host, sg_len);
567
568 return 0;
569}
570
571static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
572{
573 u32 temp;
574
575 data->error = -EINPROGRESS;
576
577 WARN_ON(host->data);
578 host->sg = NULL;
579 host->data = data;
580
James Hogan55c5efbc2011-06-29 09:29:58 +0100581 if (data->flags & MMC_DATA_READ)
582 host->dir_status = DW_MCI_RECV_STATUS;
583 else
584 host->dir_status = DW_MCI_SEND_STATUS;
585
Will Newtonf95f3852011-01-02 01:11:59 -0500586 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900587 int flags = SG_MITER_ATOMIC;
588 if (host->data->flags & MMC_DATA_READ)
589 flags |= SG_MITER_TO_SG;
590 else
591 flags |= SG_MITER_FROM_SG;
592
593 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500594 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100595 host->part_buf_start = 0;
596 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500597
James Hoganb40af3a2011-06-24 13:54:06 +0100598 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500599 temp = mci_readl(host, INTMASK);
600 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
601 mci_writel(host, INTMASK, temp);
602
603 temp = mci_readl(host, CTRL);
604 temp &= ~SDMMC_CTRL_DMA_ENABLE;
605 mci_writel(host, CTRL, temp);
606 }
607}
608
609static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
610{
611 struct dw_mci *host = slot->host;
612 unsigned long timeout = jiffies + msecs_to_jiffies(500);
613 unsigned int cmd_status = 0;
614
615 mci_writel(host, CMDARG, arg);
616 wmb();
617 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
618
619 while (time_before(jiffies, timeout)) {
620 cmd_status = mci_readl(host, CMD);
621 if (!(cmd_status & SDMMC_CMD_START))
622 return;
623 }
624 dev_err(&slot->mmc->class_dev,
625 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
626 cmd, arg, cmd_status);
627}
628
Abhilash Kesavanab269122012-11-19 10:26:21 +0530629static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500630{
631 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900632 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500633 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700634 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500635
Doug Andersonfdf492a2013-08-31 00:11:43 +0900636 if (!clock) {
637 mci_writel(host, CLKENA, 0);
638 mci_send_cmd(slot,
639 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
640 } else if (clock != host->current_speed || force_clkinit) {
641 div = host->bus_hz / clock;
642 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500643 /*
644 * move the + 1 after the divide to prevent
645 * over-clocking the card.
646 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900647 div += 1;
648
Doug Andersonfdf492a2013-08-31 00:11:43 +0900649 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500650
Doug Andersonfdf492a2013-08-31 00:11:43 +0900651 if ((clock << div) != slot->__clk_old || force_clkinit)
652 dev_info(&slot->mmc->class_dev,
653 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
654 slot->id, host->bus_hz, clock,
655 div ? ((host->bus_hz / div) >> 1) :
656 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500657
658 /* disable clock */
659 mci_writel(host, CLKENA, 0);
660 mci_writel(host, CLKSRC, 0);
661
662 /* inform CIU */
663 mci_send_cmd(slot,
664 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
665
666 /* set clock to desired speed */
667 mci_writel(host, CLKDIV, div);
668
669 /* inform CIU */
670 mci_send_cmd(slot,
671 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
672
Doug Anderson9623b5b2012-07-25 08:33:17 -0700673 /* enable clock; only low power if no SDIO */
674 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
675 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
676 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
677 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500678
679 /* inform CIU */
680 mci_send_cmd(slot,
681 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
682
Doug Andersonfdf492a2013-08-31 00:11:43 +0900683 /* keep the clock with reflecting clock dividor */
684 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500685 }
686
Doug Andersonfdf492a2013-08-31 00:11:43 +0900687 host->current_speed = clock;
688
Will Newtonf95f3852011-01-02 01:11:59 -0500689 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900690 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500691}
692
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900693static void __dw_mci_start_request(struct dw_mci *host,
694 struct dw_mci_slot *slot,
695 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500696{
697 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500698 struct mmc_data *data;
699 u32 cmdflags;
700
701 mrq = slot->mrq;
702 if (host->pdata->select_slot)
703 host->pdata->select_slot(slot->id);
704
Will Newtonf95f3852011-01-02 01:11:59 -0500705 host->cur_slot = slot;
706 host->mrq = mrq;
707
708 host->pending_events = 0;
709 host->completed_events = 0;
710 host->data_status = 0;
711
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900712 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500713 if (data) {
714 dw_mci_set_timeout(host);
715 mci_writel(host, BYTCNT, data->blksz*data->blocks);
716 mci_writel(host, BLKSIZ, data->blksz);
717 }
718
Will Newtonf95f3852011-01-02 01:11:59 -0500719 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
720
721 /* this is the first command, send the initialization clock */
722 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
723 cmdflags |= SDMMC_CMD_INIT;
724
725 if (data) {
726 dw_mci_submit_data(host, data);
727 wmb();
728 }
729
730 dw_mci_start_command(host, cmd, cmdflags);
731
732 if (mrq->stop)
733 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
734}
735
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900736static void dw_mci_start_request(struct dw_mci *host,
737 struct dw_mci_slot *slot)
738{
739 struct mmc_request *mrq = slot->mrq;
740 struct mmc_command *cmd;
741
742 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
743 __dw_mci_start_request(host, slot, cmd);
744}
745
James Hogan7456caa2011-06-24 13:55:10 +0100746/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500747static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
748 struct mmc_request *mrq)
749{
750 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
751 host->state);
752
Will Newtonf95f3852011-01-02 01:11:59 -0500753 slot->mrq = mrq;
754
755 if (host->state == STATE_IDLE) {
756 host->state = STATE_SENDING_CMD;
757 dw_mci_start_request(host, slot);
758 } else {
759 list_add_tail(&slot->queue_node, &host->queue);
760 }
Will Newtonf95f3852011-01-02 01:11:59 -0500761}
762
763static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
764{
765 struct dw_mci_slot *slot = mmc_priv(mmc);
766 struct dw_mci *host = slot->host;
767
768 WARN_ON(slot->mrq);
769
James Hogan7456caa2011-06-24 13:55:10 +0100770 /*
771 * The check for card presence and queueing of the request must be
772 * atomic, otherwise the card could be removed in between and the
773 * request wouldn't fail until another card was inserted.
774 */
775 spin_lock_bh(&host->lock);
776
Will Newtonf95f3852011-01-02 01:11:59 -0500777 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100778 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500779 mrq->cmd->error = -ENOMEDIUM;
780 mmc_request_done(mmc, mrq);
781 return;
782 }
783
Will Newtonf95f3852011-01-02 01:11:59 -0500784 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100785
786 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500787}
788
789static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
790{
791 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000792 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900793 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500794
Will Newtonf95f3852011-01-02 01:11:59 -0500795 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500796 case MMC_BUS_WIDTH_4:
797 slot->ctype = SDMMC_CTYPE_4BIT;
798 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900799 case MMC_BUS_WIDTH_8:
800 slot->ctype = SDMMC_CTYPE_8BIT;
801 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900802 default:
803 /* set default 1 bit mode */
804 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500805 }
806
Seungwon Jeon3f514292012-01-02 16:00:02 +0900807 regs = mci_readl(slot->host, UHS_REG);
808
Jaehoon Chung41babf72011-02-24 13:46:11 +0900809 /* DDR mode set */
Seungwon Jeon3f514292012-01-02 16:00:02 +0900810 if (ios->timing == MMC_TIMING_UHS_DDR50)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900811 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900812 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900813 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900814
815 mci_writel(slot->host, UHS_REG, regs);
Jaehoon Chung41babf72011-02-24 13:46:11 +0900816
Doug Andersonfdf492a2013-08-31 00:11:43 +0900817 /*
818 * Use mirror of ios->clock to prevent race with mmc
819 * core ios update when finding the minimum.
820 */
821 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500822
James Hogancb27a842012-10-16 09:43:08 +0100823 if (drv_data && drv_data->set_ios)
824 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000825
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900826 /* Slot specific timing and width adjustment */
827 dw_mci_setup_bus(slot, false);
828
Will Newtonf95f3852011-01-02 01:11:59 -0500829 switch (ios->power_mode) {
830 case MMC_POWER_UP:
831 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
James Hogane6f34e22013-03-12 10:43:32 +0000832 /* Power up slot */
833 if (slot->host->pdata->setpower)
834 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900835 regs = mci_readl(slot->host, PWREN);
836 regs |= (1 << slot->id);
837 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000838 break;
839 case MMC_POWER_OFF:
840 /* Power down slot */
841 if (slot->host->pdata->setpower)
842 slot->host->pdata->setpower(slot->id, 0);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900843 regs = mci_readl(slot->host, PWREN);
844 regs &= ~(1 << slot->id);
845 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -0500846 break;
847 default:
848 break;
849 }
850}
851
852static int dw_mci_get_ro(struct mmc_host *mmc)
853{
854 int read_only;
855 struct dw_mci_slot *slot = mmc_priv(mmc);
856 struct dw_mci_board *brd = slot->host->pdata;
857
858 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +0000859 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +0000860 read_only = 0;
861 else if (brd->get_ro)
Will Newtonf95f3852011-01-02 01:11:59 -0500862 read_only = brd->get_ro(slot->id);
Doug Anderson55a6ceb2013-01-11 17:03:53 +0000863 else if (gpio_is_valid(slot->wp_gpio))
864 read_only = gpio_get_value(slot->wp_gpio);
Will Newtonf95f3852011-01-02 01:11:59 -0500865 else
866 read_only =
867 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
868
869 dev_dbg(&mmc->class_dev, "card is %s\n",
870 read_only ? "read-only" : "read-write");
871
872 return read_only;
873}
874
875static int dw_mci_get_cd(struct mmc_host *mmc)
876{
877 int present;
878 struct dw_mci_slot *slot = mmc_priv(mmc);
879 struct dw_mci_board *brd = slot->host->pdata;
880
881 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +0900882 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
883 present = 1;
884 else if (brd->get_cd)
Will Newtonf95f3852011-01-02 01:11:59 -0500885 present = !brd->get_cd(slot->id);
886 else
887 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
888 == 0 ? 1 : 0;
889
890 if (present)
891 dev_dbg(&mmc->class_dev, "card is present\n");
892 else
893 dev_dbg(&mmc->class_dev, "card is not present\n");
894
895 return present;
896}
897
Doug Anderson9623b5b2012-07-25 08:33:17 -0700898/*
899 * Disable lower power mode.
900 *
901 * Low power mode will stop the card clock when idle. According to the
902 * description of the CLKENA register we should disable low power mode
903 * for SDIO cards if we need SDIO interrupts to work.
904 *
905 * This function is fast if low power mode is already disabled.
906 */
907static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
908{
909 struct dw_mci *host = slot->host;
910 u32 clk_en_a;
911 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
912
913 clk_en_a = mci_readl(host, CLKENA);
914
915 if (clk_en_a & clken_low_pwr) {
916 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
917 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
918 SDMMC_CMD_PRV_DAT_WAIT, 0);
919 }
920}
921
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530922static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
923{
924 struct dw_mci_slot *slot = mmc_priv(mmc);
925 struct dw_mci *host = slot->host;
926 u32 int_mask;
927
928 /* Enable/disable Slot Specific SDIO interrupt */
929 int_mask = mci_readl(host, INTMASK);
930 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -0700931 /*
932 * Turn off low power mode if it was enabled. This is a bit of
933 * a heavy operation and we disable / enable IRQs a lot, so
934 * we'll leave low power mode disabled and it will get
935 * re-enabled again in dw_mci_setup_bus().
936 */
937 dw_mci_disable_low_power(slot);
938
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530939 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +0900940 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530941 } else {
942 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +0900943 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530944 }
945}
946
Seungwon Jeon0976f162013-08-31 00:12:42 +0900947static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
948{
949 struct dw_mci_slot *slot = mmc_priv(mmc);
950 struct dw_mci *host = slot->host;
951 const struct dw_mci_drv_data *drv_data = host->drv_data;
952 struct dw_mci_tuning_data tuning_data;
953 int err = -ENOSYS;
954
955 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
956 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
957 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
958 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
959 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
960 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
961 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
962 } else {
963 return -EINVAL;
964 }
965 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
966 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
967 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
968 } else {
969 dev_err(host->dev,
970 "Undefined command(%d) for tuning\n", opcode);
971 return -EINVAL;
972 }
973
974 if (drv_data && drv_data->execute_tuning)
975 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
976 return err;
977}
978
Will Newtonf95f3852011-01-02 01:11:59 -0500979static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530980 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900981 .pre_req = dw_mci_pre_req,
982 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530983 .set_ios = dw_mci_set_ios,
984 .get_ro = dw_mci_get_ro,
985 .get_cd = dw_mci_get_cd,
986 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +0900987 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -0500988};
989
990static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
991 __releases(&host->lock)
992 __acquires(&host->lock)
993{
994 struct dw_mci_slot *slot;
995 struct mmc_host *prev_mmc = host->cur_slot->mmc;
996
997 WARN_ON(host->cmd || host->data);
998
999 host->cur_slot->mrq = NULL;
1000 host->mrq = NULL;
1001 if (!list_empty(&host->queue)) {
1002 slot = list_entry(host->queue.next,
1003 struct dw_mci_slot, queue_node);
1004 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001005 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001006 mmc_hostname(slot->mmc));
1007 host->state = STATE_SENDING_CMD;
1008 dw_mci_start_request(host, slot);
1009 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001010 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001011 host->state = STATE_IDLE;
1012 }
1013
1014 spin_unlock(&host->lock);
1015 mmc_request_done(prev_mmc, mrq);
1016 spin_lock(&host->lock);
1017}
1018
1019static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1020{
1021 u32 status = host->cmd_status;
1022
1023 host->cmd_status = 0;
1024
1025 /* Read the response from the card (up to 16 bytes) */
1026 if (cmd->flags & MMC_RSP_PRESENT) {
1027 if (cmd->flags & MMC_RSP_136) {
1028 cmd->resp[3] = mci_readl(host, RESP0);
1029 cmd->resp[2] = mci_readl(host, RESP1);
1030 cmd->resp[1] = mci_readl(host, RESP2);
1031 cmd->resp[0] = mci_readl(host, RESP3);
1032 } else {
1033 cmd->resp[0] = mci_readl(host, RESP0);
1034 cmd->resp[1] = 0;
1035 cmd->resp[2] = 0;
1036 cmd->resp[3] = 0;
1037 }
1038 }
1039
1040 if (status & SDMMC_INT_RTO)
1041 cmd->error = -ETIMEDOUT;
1042 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1043 cmd->error = -EILSEQ;
1044 else if (status & SDMMC_INT_RESP_ERR)
1045 cmd->error = -EIO;
1046 else
1047 cmd->error = 0;
1048
1049 if (cmd->error) {
1050 /* newer ip versions need a delay between retries */
1051 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1052 mdelay(20);
1053
1054 if (cmd->data) {
Will Newtonf95f3852011-01-02 01:11:59 -05001055 dw_mci_stop_dma(host);
Seungwon Jeonfda5f732012-05-22 13:01:13 +09001056 host->data = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001057 }
1058 }
1059}
1060
1061static void dw_mci_tasklet_func(unsigned long priv)
1062{
1063 struct dw_mci *host = (struct dw_mci *)priv;
1064 struct mmc_data *data;
1065 struct mmc_command *cmd;
1066 enum dw_mci_state state;
1067 enum dw_mci_state prev_state;
James Hogan94dd5b32011-06-29 09:30:47 +01001068 u32 status, ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05001069
1070 spin_lock(&host->lock);
1071
1072 state = host->state;
1073 data = host->data;
1074
1075 do {
1076 prev_state = state;
1077
1078 switch (state) {
1079 case STATE_IDLE:
1080 break;
1081
1082 case STATE_SENDING_CMD:
1083 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1084 &host->pending_events))
1085 break;
1086
1087 cmd = host->cmd;
1088 host->cmd = NULL;
1089 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001090 dw_mci_command_complete(host, cmd);
1091 if (cmd == host->mrq->sbc && !cmd->error) {
1092 prev_state = state = STATE_SENDING_CMD;
1093 __dw_mci_start_request(host, host->cur_slot,
1094 host->mrq->cmd);
1095 goto unlock;
1096 }
1097
Will Newtonf95f3852011-01-02 01:11:59 -05001098 if (!host->mrq->data || cmd->error) {
1099 dw_mci_request_end(host, host->mrq);
1100 goto unlock;
1101 }
1102
1103 prev_state = state = STATE_SENDING_DATA;
1104 /* fall through */
1105
1106 case STATE_SENDING_DATA:
1107 if (test_and_clear_bit(EVENT_DATA_ERROR,
1108 &host->pending_events)) {
1109 dw_mci_stop_dma(host);
1110 if (data->stop)
1111 send_stop_cmd(host, data);
1112 state = STATE_DATA_ERROR;
1113 break;
1114 }
1115
1116 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1117 &host->pending_events))
1118 break;
1119
1120 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1121 prev_state = state = STATE_DATA_BUSY;
1122 /* fall through */
1123
1124 case STATE_DATA_BUSY:
1125 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1126 &host->pending_events))
1127 break;
1128
1129 host->data = NULL;
1130 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1131 status = host->data_status;
1132
1133 if (status & DW_MCI_DATA_ERROR_FLAGS) {
Jaehoon Chung3f7eec62013-05-27 13:47:57 +09001134 if (status & SDMMC_INT_DRTO) {
Will Newtonf95f3852011-01-02 01:11:59 -05001135 data->error = -ETIMEDOUT;
1136 } else if (status & SDMMC_INT_DCRC) {
Will Newtonf95f3852011-01-02 01:11:59 -05001137 data->error = -EILSEQ;
James Hogan55c5efbc2011-06-29 09:29:58 +01001138 } else if (status & SDMMC_INT_EBE &&
1139 host->dir_status ==
1140 DW_MCI_SEND_STATUS) {
1141 /*
1142 * No data CRC status was returned.
1143 * The number of bytes transferred will
1144 * be exaggerated in PIO mode.
1145 */
1146 data->bytes_xfered = 0;
1147 data->error = -ETIMEDOUT;
Will Newtonf95f3852011-01-02 01:11:59 -05001148 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001149 dev_err(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -05001150 "data FIFO error "
1151 "(status=%08x)\n",
1152 status);
1153 data->error = -EIO;
1154 }
James Hogan94dd5b32011-06-29 09:30:47 +01001155 /*
1156 * After an error, there may be data lingering
1157 * in the FIFO, so reset it - doing so
1158 * generates a block interrupt, hence setting
1159 * the scatter-gather pointer to NULL.
1160 */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001161 sg_miter_stop(&host->sg_miter);
James Hogan94dd5b32011-06-29 09:30:47 +01001162 host->sg = NULL;
1163 ctrl = mci_readl(host, CTRL);
1164 ctrl |= SDMMC_CTRL_FIFO_RESET;
1165 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05001166 } else {
1167 data->bytes_xfered = data->blocks * data->blksz;
1168 data->error = 0;
1169 }
1170
1171 if (!data->stop) {
1172 dw_mci_request_end(host, host->mrq);
1173 goto unlock;
1174 }
1175
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001176 if (host->mrq->sbc && !data->error) {
1177 data->stop->error = 0;
1178 dw_mci_request_end(host, host->mrq);
1179 goto unlock;
1180 }
1181
Will Newtonf95f3852011-01-02 01:11:59 -05001182 prev_state = state = STATE_SENDING_STOP;
1183 if (!data->error)
1184 send_stop_cmd(host, data);
1185 /* fall through */
1186
1187 case STATE_SENDING_STOP:
1188 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1189 &host->pending_events))
1190 break;
1191
1192 host->cmd = NULL;
1193 dw_mci_command_complete(host, host->mrq->stop);
1194 dw_mci_request_end(host, host->mrq);
1195 goto unlock;
1196
1197 case STATE_DATA_ERROR:
1198 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1199 &host->pending_events))
1200 break;
1201
1202 state = STATE_DATA_BUSY;
1203 break;
1204 }
1205 } while (state != prev_state);
1206
1207 host->state = state;
1208unlock:
1209 spin_unlock(&host->lock);
1210
1211}
1212
James Hogan34b664a2011-06-24 13:57:56 +01001213/* push final bytes to part_buf, only use during push */
1214static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1215{
1216 memcpy((void *)&host->part_buf, buf, cnt);
1217 host->part_buf_count = cnt;
1218}
1219
1220/* append bytes to part_buf, only use during push */
1221static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1222{
1223 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1224 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1225 host->part_buf_count += cnt;
1226 return cnt;
1227}
1228
1229/* pull first bytes from part_buf, only use during pull */
1230static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1231{
1232 cnt = min(cnt, (int)host->part_buf_count);
1233 if (cnt) {
1234 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1235 cnt);
1236 host->part_buf_count -= cnt;
1237 host->part_buf_start += cnt;
1238 }
1239 return cnt;
1240}
1241
1242/* pull final bytes from the part_buf, assuming it's just been filled */
1243static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1244{
1245 memcpy(buf, &host->part_buf, cnt);
1246 host->part_buf_start = cnt;
1247 host->part_buf_count = (1 << host->data_shift) - cnt;
1248}
1249
Will Newtonf95f3852011-01-02 01:11:59 -05001250static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1251{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001252 struct mmc_data *data = host->data;
1253 int init_cnt = cnt;
1254
James Hogan34b664a2011-06-24 13:57:56 +01001255 /* try and push anything in the part_buf */
1256 if (unlikely(host->part_buf_count)) {
1257 int len = dw_mci_push_part_bytes(host, buf, cnt);
1258 buf += len;
1259 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001260 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001261 mci_writew(host, DATA(host->data_offset),
1262 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001263 host->part_buf_count = 0;
1264 }
1265 }
1266#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1267 if (unlikely((unsigned long)buf & 0x1)) {
1268 while (cnt >= 2) {
1269 u16 aligned_buf[64];
1270 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1271 int items = len >> 1;
1272 int i;
1273 /* memcpy from input buffer into aligned buffer */
1274 memcpy(aligned_buf, buf, len);
1275 buf += len;
1276 cnt -= len;
1277 /* push data from aligned buffer into fifo */
1278 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001279 mci_writew(host, DATA(host->data_offset),
1280 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001281 }
1282 } else
1283#endif
1284 {
1285 u16 *pdata = buf;
1286 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001287 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001288 buf = pdata;
1289 }
1290 /* put anything remaining in the part_buf */
1291 if (cnt) {
1292 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001293 /* Push data if we have reached the expected data length */
1294 if ((data->bytes_xfered + init_cnt) ==
1295 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001296 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001297 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001298 }
1299}
1300
1301static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1302{
James Hogan34b664a2011-06-24 13:57:56 +01001303#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1304 if (unlikely((unsigned long)buf & 0x1)) {
1305 while (cnt >= 2) {
1306 /* pull data from fifo into aligned buffer */
1307 u16 aligned_buf[64];
1308 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1309 int items = len >> 1;
1310 int i;
1311 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001312 aligned_buf[i] = mci_readw(host,
1313 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001314 /* memcpy from aligned buffer into output buffer */
1315 memcpy(buf, aligned_buf, len);
1316 buf += len;
1317 cnt -= len;
1318 }
1319 } else
1320#endif
1321 {
1322 u16 *pdata = buf;
1323 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001324 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001325 buf = pdata;
1326 }
1327 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001328 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001329 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001330 }
1331}
1332
1333static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1334{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001335 struct mmc_data *data = host->data;
1336 int init_cnt = cnt;
1337
James Hogan34b664a2011-06-24 13:57:56 +01001338 /* try and push anything in the part_buf */
1339 if (unlikely(host->part_buf_count)) {
1340 int len = dw_mci_push_part_bytes(host, buf, cnt);
1341 buf += len;
1342 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001343 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001344 mci_writel(host, DATA(host->data_offset),
1345 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001346 host->part_buf_count = 0;
1347 }
1348 }
1349#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1350 if (unlikely((unsigned long)buf & 0x3)) {
1351 while (cnt >= 4) {
1352 u32 aligned_buf[32];
1353 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1354 int items = len >> 2;
1355 int i;
1356 /* memcpy from input buffer into aligned buffer */
1357 memcpy(aligned_buf, buf, len);
1358 buf += len;
1359 cnt -= len;
1360 /* push data from aligned buffer into fifo */
1361 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001362 mci_writel(host, DATA(host->data_offset),
1363 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001364 }
1365 } else
1366#endif
1367 {
1368 u32 *pdata = buf;
1369 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001370 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001371 buf = pdata;
1372 }
1373 /* put anything remaining in the part_buf */
1374 if (cnt) {
1375 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001376 /* Push data if we have reached the expected data length */
1377 if ((data->bytes_xfered + init_cnt) ==
1378 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001379 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001380 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001381 }
1382}
1383
1384static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1385{
James Hogan34b664a2011-06-24 13:57:56 +01001386#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1387 if (unlikely((unsigned long)buf & 0x3)) {
1388 while (cnt >= 4) {
1389 /* pull data from fifo into aligned buffer */
1390 u32 aligned_buf[32];
1391 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1392 int items = len >> 2;
1393 int i;
1394 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001395 aligned_buf[i] = mci_readl(host,
1396 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001397 /* memcpy from aligned buffer into output buffer */
1398 memcpy(buf, aligned_buf, len);
1399 buf += len;
1400 cnt -= len;
1401 }
1402 } else
1403#endif
1404 {
1405 u32 *pdata = buf;
1406 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001407 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001408 buf = pdata;
1409 }
1410 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001411 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001412 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001413 }
1414}
1415
1416static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1417{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001418 struct mmc_data *data = host->data;
1419 int init_cnt = cnt;
1420
James Hogan34b664a2011-06-24 13:57:56 +01001421 /* try and push anything in the part_buf */
1422 if (unlikely(host->part_buf_count)) {
1423 int len = dw_mci_push_part_bytes(host, buf, cnt);
1424 buf += len;
1425 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001426
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001427 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001428 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001429 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001430 host->part_buf_count = 0;
1431 }
1432 }
1433#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1434 if (unlikely((unsigned long)buf & 0x7)) {
1435 while (cnt >= 8) {
1436 u64 aligned_buf[16];
1437 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1438 int items = len >> 3;
1439 int i;
1440 /* memcpy from input buffer into aligned buffer */
1441 memcpy(aligned_buf, buf, len);
1442 buf += len;
1443 cnt -= len;
1444 /* push data from aligned buffer into fifo */
1445 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001446 mci_writeq(host, DATA(host->data_offset),
1447 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001448 }
1449 } else
1450#endif
1451 {
1452 u64 *pdata = buf;
1453 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001454 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001455 buf = pdata;
1456 }
1457 /* put anything remaining in the part_buf */
1458 if (cnt) {
1459 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001460 /* Push data if we have reached the expected data length */
1461 if ((data->bytes_xfered + init_cnt) ==
1462 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001463 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001464 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001465 }
1466}
1467
1468static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1469{
James Hogan34b664a2011-06-24 13:57:56 +01001470#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1471 if (unlikely((unsigned long)buf & 0x7)) {
1472 while (cnt >= 8) {
1473 /* pull data from fifo into aligned buffer */
1474 u64 aligned_buf[16];
1475 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1476 int items = len >> 3;
1477 int i;
1478 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001479 aligned_buf[i] = mci_readq(host,
1480 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001481 /* memcpy from aligned buffer into output buffer */
1482 memcpy(buf, aligned_buf, len);
1483 buf += len;
1484 cnt -= len;
1485 }
1486 } else
1487#endif
1488 {
1489 u64 *pdata = buf;
1490 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001491 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001492 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001493 }
James Hogan34b664a2011-06-24 13:57:56 +01001494 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001495 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001496 dw_mci_pull_final_bytes(host, buf, cnt);
1497 }
1498}
1499
1500static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1501{
1502 int len;
1503
1504 /* get remaining partial bytes */
1505 len = dw_mci_pull_part_bytes(host, buf, cnt);
1506 if (unlikely(len == cnt))
1507 return;
1508 buf += len;
1509 cnt -= len;
1510
1511 /* get the rest of the data */
1512 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001513}
1514
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001515static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001516{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001517 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1518 void *buf;
1519 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001520 struct mmc_data *data = host->data;
1521 int shift = host->data_shift;
1522 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001523 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001524 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001525
1526 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001527 if (!sg_miter_next(sg_miter))
1528 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001529
Imre Deak4225fc82013-02-27 17:02:57 -08001530 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001531 buf = sg_miter->addr;
1532 remain = sg_miter->length;
1533 offset = 0;
1534
1535 do {
1536 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1537 << shift) + host->part_buf_count;
1538 len = min(remain, fcnt);
1539 if (!len)
1540 break;
1541 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001542 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001543 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001544 remain -= len;
1545 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001546
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001547 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001548 status = mci_readl(host, MINTSTS);
1549 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001550 /* if the RXDR is ready read again */
1551 } while ((status & SDMMC_INT_RXDR) ||
1552 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001553
1554 if (!remain) {
1555 if (!sg_miter_next(sg_miter))
1556 goto done;
1557 sg_miter->consumed = 0;
1558 }
1559 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001560 return;
1561
1562done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001563 sg_miter_stop(sg_miter);
1564 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001565 smp_wmb();
1566 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1567}
1568
1569static void dw_mci_write_data_pio(struct dw_mci *host)
1570{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001571 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1572 void *buf;
1573 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001574 struct mmc_data *data = host->data;
1575 int shift = host->data_shift;
1576 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001577 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001578 unsigned int fifo_depth = host->fifo_depth;
1579 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001580
1581 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001582 if (!sg_miter_next(sg_miter))
1583 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001584
Imre Deak4225fc82013-02-27 17:02:57 -08001585 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001586 buf = sg_miter->addr;
1587 remain = sg_miter->length;
1588 offset = 0;
1589
1590 do {
1591 fcnt = ((fifo_depth -
1592 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1593 << shift) - host->part_buf_count;
1594 len = min(remain, fcnt);
1595 if (!len)
1596 break;
1597 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001598 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001599 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001600 remain -= len;
1601 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001602
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001603 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001604 status = mci_readl(host, MINTSTS);
1605 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001606 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001607
1608 if (!remain) {
1609 if (!sg_miter_next(sg_miter))
1610 goto done;
1611 sg_miter->consumed = 0;
1612 }
1613 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001614 return;
1615
1616done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001617 sg_miter_stop(sg_miter);
1618 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001619 smp_wmb();
1620 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1621}
1622
1623static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1624{
1625 if (!host->cmd_status)
1626 host->cmd_status = status;
1627
1628 smp_wmb();
1629
1630 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1631 tasklet_schedule(&host->tasklet);
1632}
1633
1634static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1635{
1636 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001637 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301638 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001639
Markos Chandras1fb5f682013-03-12 10:53:11 +00001640 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1641
Doug Anderson476d79f2013-07-09 13:04:40 -07001642 /*
1643 * DTO fix - version 2.10a and below, and only if internal DMA
1644 * is configured.
1645 */
1646 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1647 if (!pending &&
1648 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1649 pending |= SDMMC_INT_DATA_OVER;
1650 }
1651
Markos Chandras1fb5f682013-03-12 10:53:11 +00001652 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001653 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1654 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001655 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001656 smp_wmb();
1657 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001658 }
1659
1660 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1661 /* if there is an error report DATA_ERROR */
1662 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001663 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001664 smp_wmb();
1665 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001666 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001667 }
1668
1669 if (pending & SDMMC_INT_DATA_OVER) {
1670 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1671 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001672 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001673 smp_wmb();
1674 if (host->dir_status == DW_MCI_RECV_STATUS) {
1675 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001676 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001677 }
1678 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1679 tasklet_schedule(&host->tasklet);
1680 }
1681
1682 if (pending & SDMMC_INT_RXDR) {
1683 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001684 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001685 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001686 }
1687
1688 if (pending & SDMMC_INT_TXDR) {
1689 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001690 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001691 dw_mci_write_data_pio(host);
1692 }
1693
1694 if (pending & SDMMC_INT_CMD_DONE) {
1695 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001696 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001697 }
1698
1699 if (pending & SDMMC_INT_CD) {
1700 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001701 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001702 }
1703
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301704 /* Handle SDIO Interrupts */
1705 for (i = 0; i < host->num_slots; i++) {
1706 struct dw_mci_slot *slot = host->slot[i];
1707 if (pending & SDMMC_INT_SDIO(i)) {
1708 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1709 mmc_signal_sdio_irq(slot->mmc);
1710 }
1711 }
1712
Markos Chandras1fb5f682013-03-12 10:53:11 +00001713 }
Will Newtonf95f3852011-01-02 01:11:59 -05001714
1715#ifdef CONFIG_MMC_DW_IDMAC
1716 /* Handle DMA interrupts */
1717 pending = mci_readl(host, IDSTS);
1718 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1719 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1720 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001721 host->dma_ops->complete(host);
1722 }
1723#endif
1724
1725 return IRQ_HANDLED;
1726}
1727
James Hogan1791b13e2011-06-24 13:55:55 +01001728static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001729{
James Hogan1791b13e2011-06-24 13:55:55 +01001730 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001731 int i;
1732
1733 for (i = 0; i < host->num_slots; i++) {
1734 struct dw_mci_slot *slot = host->slot[i];
1735 struct mmc_host *mmc = slot->mmc;
1736 struct mmc_request *mrq;
1737 int present;
1738 u32 ctrl;
1739
1740 present = dw_mci_get_cd(mmc);
1741 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001742 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1743 present ? "inserted" : "removed");
1744
James Hogan1791b13e2011-06-24 13:55:55 +01001745 spin_lock_bh(&host->lock);
1746
Will Newtonf95f3852011-01-02 01:11:59 -05001747 /* Card change detected */
1748 slot->last_detect_state = present;
1749
James Hogan1791b13e2011-06-24 13:55:55 +01001750 /* Mark card as present if applicable */
1751 if (present != 0)
Will Newtonf95f3852011-01-02 01:11:59 -05001752 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001753
1754 /* Clean up queue if present */
1755 mrq = slot->mrq;
1756 if (mrq) {
1757 if (mrq == host->mrq) {
1758 host->data = NULL;
1759 host->cmd = NULL;
1760
1761 switch (host->state) {
1762 case STATE_IDLE:
1763 break;
1764 case STATE_SENDING_CMD:
1765 mrq->cmd->error = -ENOMEDIUM;
1766 if (!mrq->data)
1767 break;
1768 /* fall through */
1769 case STATE_SENDING_DATA:
1770 mrq->data->error = -ENOMEDIUM;
1771 dw_mci_stop_dma(host);
1772 break;
1773 case STATE_DATA_BUSY:
1774 case STATE_DATA_ERROR:
1775 if (mrq->data->error == -EINPROGRESS)
1776 mrq->data->error = -ENOMEDIUM;
1777 if (!mrq->stop)
1778 break;
1779 /* fall through */
1780 case STATE_SENDING_STOP:
1781 mrq->stop->error = -ENOMEDIUM;
1782 break;
1783 }
1784
1785 dw_mci_request_end(host, mrq);
1786 } else {
1787 list_del(&slot->queue_node);
1788 mrq->cmd->error = -ENOMEDIUM;
1789 if (mrq->data)
1790 mrq->data->error = -ENOMEDIUM;
1791 if (mrq->stop)
1792 mrq->stop->error = -ENOMEDIUM;
1793
1794 spin_unlock(&host->lock);
1795 mmc_request_done(slot->mmc, mrq);
1796 spin_lock(&host->lock);
1797 }
1798 }
1799
1800 /* Power down slot */
1801 if (present == 0) {
Will Newtonf95f3852011-01-02 01:11:59 -05001802 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1803
1804 /*
1805 * Clear down the FIFO - doing so generates a
1806 * block interrupt, hence setting the
1807 * scatter-gather pointer to NULL.
1808 */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001809 sg_miter_stop(&host->sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001810 host->sg = NULL;
1811
1812 ctrl = mci_readl(host, CTRL);
1813 ctrl |= SDMMC_CTRL_FIFO_RESET;
1814 mci_writel(host, CTRL, ctrl);
1815
1816#ifdef CONFIG_MMC_DW_IDMAC
1817 ctrl = mci_readl(host, BMOD);
Seungwon Jeon141a7122012-05-22 13:01:03 +09001818 /* Software reset of DMA */
1819 ctrl |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -05001820 mci_writel(host, BMOD, ctrl);
1821#endif
1822
1823 }
1824
James Hogan1791b13e2011-06-24 13:55:55 +01001825 spin_unlock_bh(&host->lock);
1826
Will Newtonf95f3852011-01-02 01:11:59 -05001827 present = dw_mci_get_cd(mmc);
1828 }
1829
1830 mmc_detect_change(slot->mmc,
1831 msecs_to_jiffies(host->pdata->detect_delay_ms));
1832 }
1833}
1834
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001835#ifdef CONFIG_OF
1836/* given a slot id, find out the device node representing that slot */
1837static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1838{
1839 struct device_node *np;
1840 const __be32 *addr;
1841 int len;
1842
1843 if (!dev || !dev->of_node)
1844 return NULL;
1845
1846 for_each_child_of_node(dev->of_node, np) {
1847 addr = of_get_property(np, "reg", &len);
1848 if (!addr || (len < sizeof(int)))
1849 continue;
1850 if (be32_to_cpup(addr) == slot)
1851 return np;
1852 }
1853 return NULL;
1854}
1855
Doug Andersona70aaa62013-01-11 17:03:50 +00001856static struct dw_mci_of_slot_quirks {
1857 char *quirk;
1858 int id;
1859} of_slot_quirks[] = {
1860 {
1861 .quirk = "disable-wp",
1862 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
1863 },
1864};
1865
1866static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1867{
1868 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1869 int quirks = 0;
1870 int idx;
1871
1872 /* get quirks */
1873 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
1874 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
1875 quirks |= of_slot_quirks[idx].id;
1876
1877 return quirks;
1878}
1879
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001880/* find out bus-width for a given slot */
1881static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1882{
1883 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1884 u32 bus_wd = 1;
1885
1886 if (!np)
1887 return 1;
1888
1889 if (of_property_read_u32(np, "bus-width", &bus_wd))
1890 dev_err(dev, "bus-width property not found, assuming width"
1891 " as 1\n");
1892 return bus_wd;
1893}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001894
1895/* find the write protect gpio for a given slot; or -1 if none specified */
1896static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1897{
1898 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1899 int gpio;
1900
1901 if (!np)
1902 return -EINVAL;
1903
1904 gpio = of_get_named_gpio(np, "wp-gpios", 0);
1905
1906 /* Having a missing entry is valid; return silently */
1907 if (!gpio_is_valid(gpio))
1908 return -EINVAL;
1909
1910 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
1911 dev_warn(dev, "gpio [%d] request failed\n", gpio);
1912 return -EINVAL;
1913 }
1914
1915 return gpio;
1916}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001917#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00001918static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1919{
1920 return 0;
1921}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001922static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1923{
1924 return 1;
1925}
1926static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1927{
1928 return NULL;
1929}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001930static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1931{
1932 return -EINVAL;
1933}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001934#endif /* CONFIG_OF */
1935
Jaehoon Chung36c179a2012-08-23 20:31:48 +09001936static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05001937{
1938 struct mmc_host *mmc;
1939 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00001940 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00001941 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09001942 u32 freq[2];
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001943 u8 bus_width;
Will Newtonf95f3852011-01-02 01:11:59 -05001944
Thomas Abraham4a909202012-09-17 18:16:35 +00001945 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05001946 if (!mmc)
1947 return -ENOMEM;
1948
1949 slot = mmc_priv(mmc);
1950 slot->id = id;
1951 slot->mmc = mmc;
1952 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001953 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05001954
Doug Andersona70aaa62013-01-11 17:03:50 +00001955 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
1956
Will Newtonf95f3852011-01-02 01:11:59 -05001957 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09001958 if (of_property_read_u32_array(host->dev->of_node,
1959 "clock-freq-min-max", freq, 2)) {
1960 mmc->f_min = DW_MCI_FREQ_MIN;
1961 mmc->f_max = DW_MCI_FREQ_MAX;
1962 } else {
1963 mmc->f_min = freq[0];
1964 mmc->f_max = freq[1];
1965 }
Will Newtonf95f3852011-01-02 01:11:59 -05001966
1967 if (host->pdata->get_ocr)
1968 mmc->ocr_avail = host->pdata->get_ocr(id);
1969 else
1970 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1971
1972 /*
1973 * Start with slot power disabled, it will be enabled when a card
1974 * is detected.
1975 */
1976 if (host->pdata->setpower)
1977 host->pdata->setpower(id, 0);
1978
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001979 if (host->pdata->caps)
1980 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001981
Abhilash Kesavanab269122012-11-19 10:26:21 +05301982 if (host->pdata->pm_caps)
1983 mmc->pm_caps = host->pdata->pm_caps;
1984
Thomas Abraham800d78b2012-09-17 18:16:42 +00001985 if (host->dev->of_node) {
1986 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
1987 if (ctrl_id < 0)
1988 ctrl_id = 0;
1989 } else {
1990 ctrl_id = to_platform_device(host->dev)->id;
1991 }
James Hogancb27a842012-10-16 09:43:08 +01001992 if (drv_data && drv_data->caps)
1993 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00001994
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09001995 if (host->pdata->caps2)
1996 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09001997
Will Newtonf95f3852011-01-02 01:11:59 -05001998 if (host->pdata->get_bus_wd)
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001999 bus_width = host->pdata->get_bus_wd(slot->id);
2000 else if (host->dev->of_node)
2001 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2002 else
2003 bus_width = 1;
2004
2005 switch (bus_width) {
2006 case 8:
2007 mmc->caps |= MMC_CAP_8_BIT_DATA;
2008 case 4:
2009 mmc->caps |= MMC_CAP_4_BIT_DATA;
2010 }
Will Newtonf95f3852011-01-02 01:11:59 -05002011
Will Newtonf95f3852011-01-02 01:11:59 -05002012 if (host->pdata->blk_settings) {
2013 mmc->max_segs = host->pdata->blk_settings->max_segs;
2014 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2015 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2016 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2017 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2018 } else {
2019 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002020#ifdef CONFIG_MMC_DW_IDMAC
2021 mmc->max_segs = host->ring_size;
2022 mmc->max_blk_size = 65536;
2023 mmc->max_blk_count = host->ring_size;
2024 mmc->max_seg_size = 0x1000;
2025 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2026#else
Will Newtonf95f3852011-01-02 01:11:59 -05002027 mmc->max_segs = 64;
2028 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2029 mmc->max_blk_count = 512;
2030 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2031 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002032#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002033 }
Will Newtonf95f3852011-01-02 01:11:59 -05002034
2035 if (dw_mci_get_cd(mmc))
2036 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2037 else
2038 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2039
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002040 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2041
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002042 ret = mmc_add_host(mmc);
2043 if (ret)
2044 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002045
2046#if defined(CONFIG_DEBUG_FS)
2047 dw_mci_init_debugfs(slot);
2048#endif
2049
2050 /* Card initially undetected */
2051 slot->last_detect_state = 0;
2052
Will Newtonf95f3852011-01-02 01:11:59 -05002053 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002054
2055err_setup_bus:
2056 mmc_free_host(mmc);
2057 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002058}
2059
2060static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2061{
2062 /* Shutdown detect IRQ */
2063 if (slot->host->pdata->exit)
2064 slot->host->pdata->exit(id);
2065
2066 /* Debugfs stuff is cleaned up by mmc core */
2067 mmc_remove_host(slot->mmc);
2068 slot->host->slot[id] = NULL;
2069 mmc_free_host(slot->mmc);
2070}
2071
2072static void dw_mci_init_dma(struct dw_mci *host)
2073{
2074 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002075 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002076 &host->sg_dma, GFP_KERNEL);
2077 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002078 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002079 __func__);
2080 goto no_dma;
2081 }
2082
2083 /* Determine which DMA interface to use */
2084#ifdef CONFIG_MMC_DW_IDMAC
2085 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002086 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002087#endif
2088
2089 if (!host->dma_ops)
2090 goto no_dma;
2091
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002092 if (host->dma_ops->init && host->dma_ops->start &&
2093 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002094 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002095 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002096 "DMA Controller.\n", __func__);
2097 goto no_dma;
2098 }
2099 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002100 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002101 goto no_dma;
2102 }
2103
2104 host->use_dma = 1;
2105 return;
2106
2107no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002108 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002109 host->use_dma = 0;
2110 return;
2111}
2112
2113static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2114{
2115 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2116 unsigned int ctrl;
2117
2118 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2119 SDMMC_CTRL_DMA_RESET));
2120
2121 /* wait till resets clear */
2122 do {
2123 ctrl = mci_readl(host, CTRL);
2124 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2125 SDMMC_CTRL_DMA_RESET)))
2126 return true;
2127 } while (time_before(jiffies, timeout));
2128
2129 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2130
2131 return false;
2132}
2133
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002134#ifdef CONFIG_OF
2135static struct dw_mci_of_quirks {
2136 char *quirk;
2137 int id;
2138} of_quirks[] = {
2139 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002140 .quirk = "broken-cd",
2141 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2142 },
2143};
2144
2145static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2146{
2147 struct dw_mci_board *pdata;
2148 struct device *dev = host->dev;
2149 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002150 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002151 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002152 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002153
2154 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2155 if (!pdata) {
2156 dev_err(dev, "could not allocate memory for pdata\n");
2157 return ERR_PTR(-ENOMEM);
2158 }
2159
2160 /* find out number of slots supported */
2161 if (of_property_read_u32(dev->of_node, "num-slots",
2162 &pdata->num_slots)) {
2163 dev_info(dev, "num-slots property not found, "
2164 "assuming 1 slot is available\n");
2165 pdata->num_slots = 1;
2166 }
2167
2168 /* get quirks */
2169 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2170 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2171 pdata->quirks |= of_quirks[idx].id;
2172
2173 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2174 dev_info(dev, "fifo-depth property not found, using "
2175 "value of FIFOTH register as default\n");
2176
2177 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2178
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002179 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2180 pdata->bus_hz = clock_frequency;
2181
James Hogancb27a842012-10-16 09:43:08 +01002182 if (drv_data && drv_data->parse_dt) {
2183 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002184 if (ret)
2185 return ERR_PTR(ret);
2186 }
2187
Abhilash Kesavanab269122012-11-19 10:26:21 +05302188 if (of_find_property(np, "keep-power-in-suspend", NULL))
2189 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2190
2191 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2192 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2193
Seungwon Jeon10b49842013-08-31 00:13:22 +09002194 if (of_find_property(np, "supports-highspeed", NULL))
2195 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2196
Seungwon Jeon5dd63f52013-08-31 00:13:09 +09002197 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2198 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2199
2200 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2201 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2202
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002203 return pdata;
2204}
2205
2206#else /* CONFIG_OF */
2207static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2208{
2209 return ERR_PTR(-EINVAL);
2210}
2211#endif /* CONFIG_OF */
2212
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302213int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002214{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002215 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302216 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002217 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002218 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002219
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002220 if (!host->pdata) {
2221 host->pdata = dw_mci_parse_dt(host);
2222 if (IS_ERR(host->pdata)) {
2223 dev_err(host->dev, "platform data not available\n");
2224 return -EINVAL;
2225 }
Will Newtonf95f3852011-01-02 01:11:59 -05002226 }
2227
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302228 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002229 dev_err(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -05002230 "Platform data must supply select_slot function\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302231 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002232 }
2233
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002234 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002235 if (IS_ERR(host->biu_clk)) {
2236 dev_dbg(host->dev, "biu clock not available\n");
2237 } else {
2238 ret = clk_prepare_enable(host->biu_clk);
2239 if (ret) {
2240 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002241 return ret;
2242 }
Will Newtonf95f3852011-01-02 01:11:59 -05002243 }
2244
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002245 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002246 if (IS_ERR(host->ciu_clk)) {
2247 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002248 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002249 } else {
2250 ret = clk_prepare_enable(host->ciu_clk);
2251 if (ret) {
2252 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002253 goto err_clk_biu;
2254 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002255
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002256 if (host->pdata->bus_hz) {
2257 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2258 if (ret)
2259 dev_warn(host->dev,
2260 "Unable to set bus rate to %ul\n",
2261 host->pdata->bus_hz);
2262 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002263 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002264 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002265
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002266 if (drv_data && drv_data->init) {
2267 ret = drv_data->init(host);
2268 if (ret) {
2269 dev_err(host->dev,
2270 "implementation specific init failed\n");
2271 goto err_clk_ciu;
2272 }
2273 }
2274
James Hogancb27a842012-10-16 09:43:08 +01002275 if (drv_data && drv_data->setup_clock) {
2276 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002277 if (ret) {
2278 dev_err(host->dev,
2279 "implementation specific clock setup failed\n");
2280 goto err_clk_ciu;
2281 }
2282 }
2283
Mark Browna55d6ff2013-07-29 21:55:27 +01002284 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002285 if (IS_ERR(host->vmmc)) {
2286 ret = PTR_ERR(host->vmmc);
2287 if (ret == -EPROBE_DEFER)
2288 goto err_clk_ciu;
2289
2290 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2291 host->vmmc = NULL;
2292 } else {
2293 ret = regulator_enable(host->vmmc);
2294 if (ret) {
2295 if (ret != -EPROBE_DEFER)
2296 dev_err(host->dev,
2297 "regulator_enable fail: %d\n", ret);
2298 goto err_clk_ciu;
2299 }
2300 }
2301
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002302 if (!host->bus_hz) {
2303 dev_err(host->dev,
2304 "Platform data must supply bus speed\n");
2305 ret = -ENODEV;
Doug Anderson870556a2013-06-07 10:28:29 -07002306 goto err_regulator;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002307 }
2308
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302309 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002310
2311 spin_lock_init(&host->lock);
2312 INIT_LIST_HEAD(&host->queue);
2313
Will Newtonf95f3852011-01-02 01:11:59 -05002314 /*
2315 * Get the host data width - this assumes that HCON has been set with
2316 * the correct values.
2317 */
2318 i = (mci_readl(host, HCON) >> 7) & 0x7;
2319 if (!i) {
2320 host->push_data = dw_mci_push_data16;
2321 host->pull_data = dw_mci_pull_data16;
2322 width = 16;
2323 host->data_shift = 1;
2324 } else if (i == 2) {
2325 host->push_data = dw_mci_push_data64;
2326 host->pull_data = dw_mci_pull_data64;
2327 width = 64;
2328 host->data_shift = 3;
2329 } else {
2330 /* Check for a reserved value, and warn if it is */
2331 WARN((i != 1),
2332 "HCON reports a reserved host data width!\n"
2333 "Defaulting to 32-bit access.\n");
2334 host->push_data = dw_mci_push_data32;
2335 host->pull_data = dw_mci_pull_data32;
2336 width = 32;
2337 host->data_shift = 2;
2338 }
2339
2340 /* Reset all blocks */
Thomas Abraham4a909202012-09-17 18:16:35 +00002341 if (!mci_wait_reset(host->dev, host))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002342 return -ENODEV;
2343
2344 host->dma_ops = host->pdata->dma_ops;
2345 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002346
2347 /* Clear the interrupts for the host controller */
2348 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2349 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2350
2351 /* Put in max timeout */
2352 mci_writel(host, TMOUT, 0xFFFFFFFF);
2353
2354 /*
2355 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2356 * Tx Mark = fifo_size / 2 DMA Size = 8
2357 */
James Hoganb86d8252011-06-24 13:57:18 +01002358 if (!host->pdata->fifo_depth) {
2359 /*
2360 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2361 * have been overwritten by the bootloader, just like we're
2362 * about to do, so if you know the value for your hardware, you
2363 * should put it in the platform data.
2364 */
2365 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002366 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002367 } else {
2368 fifo_size = host->pdata->fifo_depth;
2369 }
2370 host->fifo_depth = fifo_size;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002371 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
2372 ((fifo_size/2) << 0));
2373 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002374
2375 /* disable clock to CIU */
2376 mci_writel(host, CLKENA, 0);
2377 mci_writel(host, CLKSRC, 0);
2378
James Hogan63008762013-03-12 10:43:54 +00002379 /*
2380 * In 2.40a spec, Data offset is changed.
2381 * Need to check the version-id and set data-offset for DATA register.
2382 */
2383 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2384 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2385
2386 if (host->verid < DW_MMC_240A)
2387 host->data_offset = DATA_OFFSET;
2388 else
2389 host->data_offset = DATA_240A_OFFSET;
2390
Will Newtonf95f3852011-01-02 01:11:59 -05002391 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002392 host->card_workqueue = alloc_workqueue("dw-mci-card",
James Hogan1791b13e2011-06-24 13:55:55 +01002393 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002394 if (!host->card_workqueue) {
2395 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002396 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002397 }
James Hogan1791b13e2011-06-24 13:55:55 +01002398 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002399 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2400 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002401 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002402 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002403
Will Newtonf95f3852011-01-02 01:11:59 -05002404 if (host->pdata->num_slots)
2405 host->num_slots = host->pdata->num_slots;
2406 else
2407 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2408
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302409 /*
2410 * Enable interrupts for command done, data over, data empty, card det,
2411 * receive ready and error such as transmit, receive timeout, crc error
2412 */
2413 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2414 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2415 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2416 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2417 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2418
2419 dev_info(host->dev, "DW MMC controller at irq %d, "
2420 "%d bit host data width, "
2421 "%u deep fifo\n",
2422 host->irq, width, fifo_size);
2423
Will Newtonf95f3852011-01-02 01:11:59 -05002424 /* We need at least one slot to succeed */
2425 for (i = 0; i < host->num_slots; i++) {
2426 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002427 if (ret)
2428 dev_dbg(host->dev, "slot %d init failed\n", i);
2429 else
2430 init_slots++;
2431 }
2432
2433 if (init_slots) {
2434 dev_info(host->dev, "%d slots initialized\n", init_slots);
2435 } else {
2436 dev_dbg(host->dev, "attempted to initialize %d slots, "
2437 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002438 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002439 }
2440
Will Newtonf95f3852011-01-02 01:11:59 -05002441 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002442 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002443
2444 return 0;
2445
James Hogan1791b13e2011-06-24 13:55:55 +01002446err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002447 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002448
Will Newtonf95f3852011-01-02 01:11:59 -05002449err_dmaunmap:
2450 if (host->use_dma && host->dma_ops->exit)
2451 host->dma_ops->exit(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002452
Doug Anderson870556a2013-06-07 10:28:29 -07002453err_regulator:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002454 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002455 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002456
2457err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002458 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002459 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002460
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002461err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002462 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002463 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002464
Will Newtonf95f3852011-01-02 01:11:59 -05002465 return ret;
2466}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302467EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002468
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302469void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002470{
Will Newtonf95f3852011-01-02 01:11:59 -05002471 int i;
2472
2473 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2474 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2475
Will Newtonf95f3852011-01-02 01:11:59 -05002476 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002477 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002478 if (host->slot[i])
2479 dw_mci_cleanup_slot(host->slot[i], i);
2480 }
2481
2482 /* disable clock to CIU */
2483 mci_writel(host, CLKENA, 0);
2484 mci_writel(host, CLKSRC, 0);
2485
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002486 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002487
2488 if (host->use_dma && host->dma_ops->exit)
2489 host->dma_ops->exit(host);
2490
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002491 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002492 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002493
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002494 if (!IS_ERR(host->ciu_clk))
2495 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002496
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002497 if (!IS_ERR(host->biu_clk))
2498 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002499}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302500EXPORT_SYMBOL(dw_mci_remove);
2501
2502
Will Newtonf95f3852011-01-02 01:11:59 -05002503
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002504#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002505/*
2506 * TODO: we should probably disable the clock to the card in the suspend path.
2507 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302508int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002509{
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302510 int i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002511
2512 for (i = 0; i < host->num_slots; i++) {
2513 struct dw_mci_slot *slot = host->slot[i];
2514 if (!slot)
2515 continue;
2516 ret = mmc_suspend_host(slot->mmc);
2517 if (ret < 0) {
2518 while (--i >= 0) {
2519 slot = host->slot[i];
2520 if (slot)
2521 mmc_resume_host(host->slot[i]->mmc);
2522 }
2523 return ret;
2524 }
2525 }
2526
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002527 if (host->vmmc)
2528 regulator_disable(host->vmmc);
2529
Will Newtonf95f3852011-01-02 01:11:59 -05002530 return 0;
2531}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302532EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002533
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302534int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002535{
2536 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002537
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302538 if (host->vmmc) {
2539 ret = regulator_enable(host->vmmc);
2540 if (ret) {
2541 dev_err(host->dev,
2542 "failed to enable regulator: %d\n", ret);
2543 return ret;
2544 }
2545 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002546
Thomas Abraham4a909202012-09-17 18:16:35 +00002547 if (!mci_wait_reset(host->dev, host)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002548 ret = -ENODEV;
2549 return ret;
2550 }
2551
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002552 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002553 host->dma_ops->init(host);
2554
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002555 /* Restore the old value at FIFOTH register */
2556 mci_writel(host, FIFOTH, host->fifoth_val);
2557
Doug Anderson2eb29442013-08-31 00:11:49 +09002558 /* Put in max timeout */
2559 mci_writel(host, TMOUT, 0xFFFFFFFF);
2560
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002561 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2562 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2563 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2564 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2565 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2566
Will Newtonf95f3852011-01-02 01:11:59 -05002567 for (i = 0; i < host->num_slots; i++) {
2568 struct dw_mci_slot *slot = host->slot[i];
2569 if (!slot)
2570 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302571 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2572 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2573 dw_mci_setup_bus(slot, true);
2574 }
2575
Will Newtonf95f3852011-01-02 01:11:59 -05002576 ret = mmc_resume_host(host->slot[i]->mmc);
2577 if (ret < 0)
2578 return ret;
2579 }
Will Newtonf95f3852011-01-02 01:11:59 -05002580 return 0;
2581}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302582EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002583#endif /* CONFIG_PM_SLEEP */
2584
Will Newtonf95f3852011-01-02 01:11:59 -05002585static int __init dw_mci_init(void)
2586{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302587 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302588 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002589}
2590
2591static void __exit dw_mci_exit(void)
2592{
Will Newtonf95f3852011-01-02 01:11:59 -05002593}
2594
2595module_init(dw_mci_init);
2596module_exit(dw_mci_exit);
2597
2598MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2599MODULE_AUTHOR("NXP Semiconductor VietNam");
2600MODULE_AUTHOR("Imagination Technologies Ltd");
2601MODULE_LICENSE("GPL v2");