blob: 696bb468fc8e7c584d34a2fb80b047d72efe38d8 [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
32#include <linux/mmc/dw_mmc.h>
33#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090034#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010035#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000036#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000037#include <linux/of_gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050038
39#include "dw_mmc.h"
40
41/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090042#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050043 SDMMC_INT_HTO | SDMMC_INT_SBE | \
44 SDMMC_INT_EBE)
45#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46 SDMMC_INT_RESP_ERR)
47#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
48 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
49#define DW_MCI_SEND_STATUS 1
50#define DW_MCI_RECV_STATUS 2
51#define DW_MCI_DMA_THRESHOLD 16
52
53#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090054#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
55 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
56 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
57 SDMMC_IDMAC_INT_TI)
58
Will Newtonf95f3852011-01-02 01:11:59 -050059struct idmac_desc {
60 u32 des0; /* Control Descriptor */
61#define IDMAC_DES0_DIC BIT(1)
62#define IDMAC_DES0_LD BIT(2)
63#define IDMAC_DES0_FD BIT(3)
64#define IDMAC_DES0_CH BIT(4)
65#define IDMAC_DES0_ER BIT(5)
66#define IDMAC_DES0_CES BIT(30)
67#define IDMAC_DES0_OWN BIT(31)
68
69 u32 des1; /* Buffer sizes */
70#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040071 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050072
73 u32 des2; /* buffer 1 physical address */
74
75 u32 des3; /* buffer 2 physical address */
76};
77#endif /* CONFIG_MMC_DW_IDMAC */
78
Seungwon Jeon0976f162013-08-31 00:12:42 +090079static const u8 tuning_blk_pattern_4bit[] = {
80 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
81 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
82 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
83 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
84 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
85 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
86 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
87 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
88};
Will Newtonf95f3852011-01-02 01:11:59 -050089
Seungwon Jeon0976f162013-08-31 00:12:42 +090090static const u8 tuning_blk_pattern_8bit[] = {
91 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
92 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
93 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
94 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
95 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
96 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
97 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
98 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
99 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
100 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
101 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
102 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
103 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
104 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
105 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
106 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500107};
108
109#if defined(CONFIG_DEBUG_FS)
110static int dw_mci_req_show(struct seq_file *s, void *v)
111{
112 struct dw_mci_slot *slot = s->private;
113 struct mmc_request *mrq;
114 struct mmc_command *cmd;
115 struct mmc_command *stop;
116 struct mmc_data *data;
117
118 /* Make sure we get a consistent snapshot */
119 spin_lock_bh(&slot->host->lock);
120 mrq = slot->mrq;
121
122 if (mrq) {
123 cmd = mrq->cmd;
124 data = mrq->data;
125 stop = mrq->stop;
126
127 if (cmd)
128 seq_printf(s,
129 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
130 cmd->opcode, cmd->arg, cmd->flags,
131 cmd->resp[0], cmd->resp[1], cmd->resp[2],
132 cmd->resp[2], cmd->error);
133 if (data)
134 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
135 data->bytes_xfered, data->blocks,
136 data->blksz, data->flags, data->error);
137 if (stop)
138 seq_printf(s,
139 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
140 stop->opcode, stop->arg, stop->flags,
141 stop->resp[0], stop->resp[1], stop->resp[2],
142 stop->resp[2], stop->error);
143 }
144
145 spin_unlock_bh(&slot->host->lock);
146
147 return 0;
148}
149
150static int dw_mci_req_open(struct inode *inode, struct file *file)
151{
152 return single_open(file, dw_mci_req_show, inode->i_private);
153}
154
155static const struct file_operations dw_mci_req_fops = {
156 .owner = THIS_MODULE,
157 .open = dw_mci_req_open,
158 .read = seq_read,
159 .llseek = seq_lseek,
160 .release = single_release,
161};
162
163static int dw_mci_regs_show(struct seq_file *s, void *v)
164{
165 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
166 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
167 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
168 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
169 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
170 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
171
172 return 0;
173}
174
175static int dw_mci_regs_open(struct inode *inode, struct file *file)
176{
177 return single_open(file, dw_mci_regs_show, inode->i_private);
178}
179
180static const struct file_operations dw_mci_regs_fops = {
181 .owner = THIS_MODULE,
182 .open = dw_mci_regs_open,
183 .read = seq_read,
184 .llseek = seq_lseek,
185 .release = single_release,
186};
187
188static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
189{
190 struct mmc_host *mmc = slot->mmc;
191 struct dw_mci *host = slot->host;
192 struct dentry *root;
193 struct dentry *node;
194
195 root = mmc->debugfs_root;
196 if (!root)
197 return;
198
199 node = debugfs_create_file("regs", S_IRUSR, root, host,
200 &dw_mci_regs_fops);
201 if (!node)
202 goto err;
203
204 node = debugfs_create_file("req", S_IRUSR, root, slot,
205 &dw_mci_req_fops);
206 if (!node)
207 goto err;
208
209 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
210 if (!node)
211 goto err;
212
213 node = debugfs_create_x32("pending_events", S_IRUSR, root,
214 (u32 *)&host->pending_events);
215 if (!node)
216 goto err;
217
218 node = debugfs_create_x32("completed_events", S_IRUSR, root,
219 (u32 *)&host->completed_events);
220 if (!node)
221 goto err;
222
223 return;
224
225err:
226 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
227}
228#endif /* defined(CONFIG_DEBUG_FS) */
229
230static void dw_mci_set_timeout(struct dw_mci *host)
231{
232 /* timeout (maximum) */
233 mci_writel(host, TMOUT, 0xffffffff);
234}
235
236static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
237{
238 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000239 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000240 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500241 u32 cmdr;
242 cmd->error = -EINPROGRESS;
243
244 cmdr = cmd->opcode;
245
246 if (cmdr == MMC_STOP_TRANSMISSION)
247 cmdr |= SDMMC_CMD_STOP;
248 else
249 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
250
251 if (cmd->flags & MMC_RSP_PRESENT) {
252 /* We expect a response, so set this bit */
253 cmdr |= SDMMC_CMD_RESP_EXP;
254 if (cmd->flags & MMC_RSP_136)
255 cmdr |= SDMMC_CMD_RESP_LONG;
256 }
257
258 if (cmd->flags & MMC_RSP_CRC)
259 cmdr |= SDMMC_CMD_RESP_CRC;
260
261 data = cmd->data;
262 if (data) {
263 cmdr |= SDMMC_CMD_DAT_EXP;
264 if (data->flags & MMC_DATA_STREAM)
265 cmdr |= SDMMC_CMD_STRM_MODE;
266 if (data->flags & MMC_DATA_WRITE)
267 cmdr |= SDMMC_CMD_DAT_WR;
268 }
269
James Hogancb27a842012-10-16 09:43:08 +0100270 if (drv_data && drv_data->prepare_command)
271 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000272
Will Newtonf95f3852011-01-02 01:11:59 -0500273 return cmdr;
274}
275
276static void dw_mci_start_command(struct dw_mci *host,
277 struct mmc_command *cmd, u32 cmd_flags)
278{
279 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000280 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500281 "start command: ARGR=0x%08x CMDR=0x%08x\n",
282 cmd->arg, cmd_flags);
283
284 mci_writel(host, CMDARG, cmd->arg);
285 wmb();
286
287 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
288}
289
290static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
291{
292 dw_mci_start_command(host, data->stop, host->stop_cmdr);
293}
294
295/* DMA interface functions */
296static void dw_mci_stop_dma(struct dw_mci *host)
297{
James Hogan03e8cb52011-06-29 09:28:43 +0100298 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500299 host->dma_ops->stop(host);
300 host->dma_ops->cleanup(host);
301 } else {
302 /* Data transfer was stopped by the interrupt handler */
303 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
304 }
305}
306
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900307static int dw_mci_get_dma_dir(struct mmc_data *data)
308{
309 if (data->flags & MMC_DATA_WRITE)
310 return DMA_TO_DEVICE;
311 else
312 return DMA_FROM_DEVICE;
313}
314
Jaehoon Chung9beee912012-02-16 11:19:38 +0900315#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500316static void dw_mci_dma_cleanup(struct dw_mci *host)
317{
318 struct mmc_data *data = host->data;
319
320 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900321 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000322 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900323 data->sg,
324 data->sg_len,
325 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500326}
327
328static void dw_mci_idmac_stop_dma(struct dw_mci *host)
329{
330 u32 temp;
331
332 /* Disable and reset the IDMAC interface */
333 temp = mci_readl(host, CTRL);
334 temp &= ~SDMMC_CTRL_USE_IDMAC;
335 temp |= SDMMC_CTRL_DMA_RESET;
336 mci_writel(host, CTRL, temp);
337
338 /* Stop the IDMAC running */
339 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900340 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Will Newtonf95f3852011-01-02 01:11:59 -0500341 mci_writel(host, BMOD, temp);
342}
343
344static void dw_mci_idmac_complete_dma(struct dw_mci *host)
345{
346 struct mmc_data *data = host->data;
347
Thomas Abraham4a909202012-09-17 18:16:35 +0000348 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500349
350 host->dma_ops->cleanup(host);
351
352 /*
353 * If the card was removed, data will be NULL. No point in trying to
354 * send the stop command or waiting for NBUSY in this case.
355 */
356 if (data) {
357 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
358 tasklet_schedule(&host->tasklet);
359 }
360}
361
362static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
363 unsigned int sg_len)
364{
365 int i;
366 struct idmac_desc *desc = host->sg_cpu;
367
368 for (i = 0; i < sg_len; i++, desc++) {
369 unsigned int length = sg_dma_len(&data->sg[i]);
370 u32 mem_addr = sg_dma_address(&data->sg[i]);
371
372 /* Set the OWN bit and disable interrupts for this descriptor */
373 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
374
375 /* Buffer length */
376 IDMAC_SET_BUFFER1_SIZE(desc, length);
377
378 /* Physical address to DMA to/from */
379 desc->des2 = mem_addr;
380 }
381
382 /* Set first descriptor */
383 desc = host->sg_cpu;
384 desc->des0 |= IDMAC_DES0_FD;
385
386 /* Set last descriptor */
387 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
388 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
389 desc->des0 |= IDMAC_DES0_LD;
390
391 wmb();
392}
393
394static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
395{
396 u32 temp;
397
398 dw_mci_translate_sglist(host, host->data, sg_len);
399
400 /* Select IDMAC interface */
401 temp = mci_readl(host, CTRL);
402 temp |= SDMMC_CTRL_USE_IDMAC;
403 mci_writel(host, CTRL, temp);
404
405 wmb();
406
407 /* Enable the IDMAC */
408 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900409 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500410 mci_writel(host, BMOD, temp);
411
412 /* Start it running */
413 mci_writel(host, PLDMND, 1);
414}
415
416static int dw_mci_idmac_init(struct dw_mci *host)
417{
418 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800419 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500420
421 /* Number of descriptors in the ring buffer */
422 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
423
424 /* Forward link the descriptor list */
425 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
426 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
427
428 /* Set the last descriptor as the end-of-ring descriptor */
429 p->des3 = host->sg_dma;
430 p->des0 = IDMAC_DES0_ER;
431
Seungwon Jeon141a7122012-05-22 13:01:03 +0900432 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
433
Will Newtonf95f3852011-01-02 01:11:59 -0500434 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900435 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500436 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
437 SDMMC_IDMAC_INT_TI);
438
439 /* Set the descriptor base address */
440 mci_writel(host, DBADDR, host->sg_dma);
441 return 0;
442}
443
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100444static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900445 .init = dw_mci_idmac_init,
446 .start = dw_mci_idmac_start_dma,
447 .stop = dw_mci_idmac_stop_dma,
448 .complete = dw_mci_idmac_complete_dma,
449 .cleanup = dw_mci_dma_cleanup,
450};
451#endif /* CONFIG_MMC_DW_IDMAC */
452
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900453static int dw_mci_pre_dma_transfer(struct dw_mci *host,
454 struct mmc_data *data,
455 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500456{
457 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900458 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500459
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900460 if (!next && data->host_cookie)
461 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500462
463 /*
464 * We don't do DMA on "complex" transfers, i.e. with
465 * non-word-aligned buffers or lengths. Also, we don't bother
466 * with all the DMA setup overhead for short transfers.
467 */
468 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
469 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900470
Will Newtonf95f3852011-01-02 01:11:59 -0500471 if (data->blksz & 3)
472 return -EINVAL;
473
474 for_each_sg(data->sg, sg, data->sg_len, i) {
475 if (sg->offset & 3 || sg->length & 3)
476 return -EINVAL;
477 }
478
Thomas Abraham4a909202012-09-17 18:16:35 +0000479 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900480 data->sg,
481 data->sg_len,
482 dw_mci_get_dma_dir(data));
483 if (sg_len == 0)
484 return -EINVAL;
485
486 if (next)
487 data->host_cookie = sg_len;
488
489 return sg_len;
490}
491
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900492static void dw_mci_pre_req(struct mmc_host *mmc,
493 struct mmc_request *mrq,
494 bool is_first_req)
495{
496 struct dw_mci_slot *slot = mmc_priv(mmc);
497 struct mmc_data *data = mrq->data;
498
499 if (!slot->host->use_dma || !data)
500 return;
501
502 if (data->host_cookie) {
503 data->host_cookie = 0;
504 return;
505 }
506
507 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
508 data->host_cookie = 0;
509}
510
511static void dw_mci_post_req(struct mmc_host *mmc,
512 struct mmc_request *mrq,
513 int err)
514{
515 struct dw_mci_slot *slot = mmc_priv(mmc);
516 struct mmc_data *data = mrq->data;
517
518 if (!slot->host->use_dma || !data)
519 return;
520
521 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000522 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900523 data->sg,
524 data->sg_len,
525 dw_mci_get_dma_dir(data));
526 data->host_cookie = 0;
527}
528
529static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
530{
531 int sg_len;
532 u32 temp;
533
534 host->using_dma = 0;
535
536 /* If we don't have a channel, we can't do DMA */
537 if (!host->use_dma)
538 return -ENODEV;
539
540 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900541 if (sg_len < 0) {
542 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900543 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900544 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900545
James Hogan03e8cb52011-06-29 09:28:43 +0100546 host->using_dma = 1;
547
Thomas Abraham4a909202012-09-17 18:16:35 +0000548 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500549 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
550 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
551 sg_len);
552
553 /* Enable the DMA interface */
554 temp = mci_readl(host, CTRL);
555 temp |= SDMMC_CTRL_DMA_ENABLE;
556 mci_writel(host, CTRL, temp);
557
558 /* Disable RX/TX IRQs, let DMA handle it */
559 temp = mci_readl(host, INTMASK);
560 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
561 mci_writel(host, INTMASK, temp);
562
563 host->dma_ops->start(host, sg_len);
564
565 return 0;
566}
567
568static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
569{
570 u32 temp;
571
572 data->error = -EINPROGRESS;
573
574 WARN_ON(host->data);
575 host->sg = NULL;
576 host->data = data;
577
James Hogan55c5efbc2011-06-29 09:29:58 +0100578 if (data->flags & MMC_DATA_READ)
579 host->dir_status = DW_MCI_RECV_STATUS;
580 else
581 host->dir_status = DW_MCI_SEND_STATUS;
582
Will Newtonf95f3852011-01-02 01:11:59 -0500583 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900584 int flags = SG_MITER_ATOMIC;
585 if (host->data->flags & MMC_DATA_READ)
586 flags |= SG_MITER_TO_SG;
587 else
588 flags |= SG_MITER_FROM_SG;
589
590 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500591 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100592 host->part_buf_start = 0;
593 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500594
James Hoganb40af3a2011-06-24 13:54:06 +0100595 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500596 temp = mci_readl(host, INTMASK);
597 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
598 mci_writel(host, INTMASK, temp);
599
600 temp = mci_readl(host, CTRL);
601 temp &= ~SDMMC_CTRL_DMA_ENABLE;
602 mci_writel(host, CTRL, temp);
603 }
604}
605
606static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
607{
608 struct dw_mci *host = slot->host;
609 unsigned long timeout = jiffies + msecs_to_jiffies(500);
610 unsigned int cmd_status = 0;
611
612 mci_writel(host, CMDARG, arg);
613 wmb();
614 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
615
616 while (time_before(jiffies, timeout)) {
617 cmd_status = mci_readl(host, CMD);
618 if (!(cmd_status & SDMMC_CMD_START))
619 return;
620 }
621 dev_err(&slot->mmc->class_dev,
622 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
623 cmd, arg, cmd_status);
624}
625
Abhilash Kesavanab269122012-11-19 10:26:21 +0530626static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500627{
628 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900629 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500630 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700631 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500632
Doug Andersonfdf492a2013-08-31 00:11:43 +0900633 if (!clock) {
634 mci_writel(host, CLKENA, 0);
635 mci_send_cmd(slot,
636 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
637 } else if (clock != host->current_speed || force_clkinit) {
638 div = host->bus_hz / clock;
639 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500640 /*
641 * move the + 1 after the divide to prevent
642 * over-clocking the card.
643 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900644 div += 1;
645
Doug Andersonfdf492a2013-08-31 00:11:43 +0900646 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500647
Doug Andersonfdf492a2013-08-31 00:11:43 +0900648 if ((clock << div) != slot->__clk_old || force_clkinit)
649 dev_info(&slot->mmc->class_dev,
650 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
651 slot->id, host->bus_hz, clock,
652 div ? ((host->bus_hz / div) >> 1) :
653 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500654
655 /* disable clock */
656 mci_writel(host, CLKENA, 0);
657 mci_writel(host, CLKSRC, 0);
658
659 /* inform CIU */
660 mci_send_cmd(slot,
661 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
662
663 /* set clock to desired speed */
664 mci_writel(host, CLKDIV, div);
665
666 /* inform CIU */
667 mci_send_cmd(slot,
668 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
669
Doug Anderson9623b5b2012-07-25 08:33:17 -0700670 /* enable clock; only low power if no SDIO */
671 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
672 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
673 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
674 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500675
676 /* inform CIU */
677 mci_send_cmd(slot,
678 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
679
Doug Andersonfdf492a2013-08-31 00:11:43 +0900680 /* keep the clock with reflecting clock dividor */
681 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500682 }
683
Doug Andersonfdf492a2013-08-31 00:11:43 +0900684 host->current_speed = clock;
685
Will Newtonf95f3852011-01-02 01:11:59 -0500686 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900687 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500688}
689
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900690static void __dw_mci_start_request(struct dw_mci *host,
691 struct dw_mci_slot *slot,
692 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500693{
694 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500695 struct mmc_data *data;
696 u32 cmdflags;
697
698 mrq = slot->mrq;
699 if (host->pdata->select_slot)
700 host->pdata->select_slot(slot->id);
701
Will Newtonf95f3852011-01-02 01:11:59 -0500702 host->cur_slot = slot;
703 host->mrq = mrq;
704
705 host->pending_events = 0;
706 host->completed_events = 0;
707 host->data_status = 0;
708
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900709 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500710 if (data) {
711 dw_mci_set_timeout(host);
712 mci_writel(host, BYTCNT, data->blksz*data->blocks);
713 mci_writel(host, BLKSIZ, data->blksz);
714 }
715
Will Newtonf95f3852011-01-02 01:11:59 -0500716 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
717
718 /* this is the first command, send the initialization clock */
719 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
720 cmdflags |= SDMMC_CMD_INIT;
721
722 if (data) {
723 dw_mci_submit_data(host, data);
724 wmb();
725 }
726
727 dw_mci_start_command(host, cmd, cmdflags);
728
729 if (mrq->stop)
730 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
731}
732
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900733static void dw_mci_start_request(struct dw_mci *host,
734 struct dw_mci_slot *slot)
735{
736 struct mmc_request *mrq = slot->mrq;
737 struct mmc_command *cmd;
738
739 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
740 __dw_mci_start_request(host, slot, cmd);
741}
742
James Hogan7456caa2011-06-24 13:55:10 +0100743/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500744static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
745 struct mmc_request *mrq)
746{
747 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
748 host->state);
749
Will Newtonf95f3852011-01-02 01:11:59 -0500750 slot->mrq = mrq;
751
752 if (host->state == STATE_IDLE) {
753 host->state = STATE_SENDING_CMD;
754 dw_mci_start_request(host, slot);
755 } else {
756 list_add_tail(&slot->queue_node, &host->queue);
757 }
Will Newtonf95f3852011-01-02 01:11:59 -0500758}
759
760static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
761{
762 struct dw_mci_slot *slot = mmc_priv(mmc);
763 struct dw_mci *host = slot->host;
764
765 WARN_ON(slot->mrq);
766
James Hogan7456caa2011-06-24 13:55:10 +0100767 /*
768 * The check for card presence and queueing of the request must be
769 * atomic, otherwise the card could be removed in between and the
770 * request wouldn't fail until another card was inserted.
771 */
772 spin_lock_bh(&host->lock);
773
Will Newtonf95f3852011-01-02 01:11:59 -0500774 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100775 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500776 mrq->cmd->error = -ENOMEDIUM;
777 mmc_request_done(mmc, mrq);
778 return;
779 }
780
Will Newtonf95f3852011-01-02 01:11:59 -0500781 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100782
783 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500784}
785
786static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
787{
788 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000789 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900790 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500791
Will Newtonf95f3852011-01-02 01:11:59 -0500792 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500793 case MMC_BUS_WIDTH_4:
794 slot->ctype = SDMMC_CTYPE_4BIT;
795 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900796 case MMC_BUS_WIDTH_8:
797 slot->ctype = SDMMC_CTYPE_8BIT;
798 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900799 default:
800 /* set default 1 bit mode */
801 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500802 }
803
Seungwon Jeon3f514292012-01-02 16:00:02 +0900804 regs = mci_readl(slot->host, UHS_REG);
805
Jaehoon Chung41babf72011-02-24 13:46:11 +0900806 /* DDR mode set */
Seungwon Jeon3f514292012-01-02 16:00:02 +0900807 if (ios->timing == MMC_TIMING_UHS_DDR50)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900808 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900809 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900810 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900811
812 mci_writel(slot->host, UHS_REG, regs);
Jaehoon Chung41babf72011-02-24 13:46:11 +0900813
Doug Andersonfdf492a2013-08-31 00:11:43 +0900814 /*
815 * Use mirror of ios->clock to prevent race with mmc
816 * core ios update when finding the minimum.
817 */
818 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500819
James Hogancb27a842012-10-16 09:43:08 +0100820 if (drv_data && drv_data->set_ios)
821 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000822
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900823 /* Slot specific timing and width adjustment */
824 dw_mci_setup_bus(slot, false);
825
Will Newtonf95f3852011-01-02 01:11:59 -0500826 switch (ios->power_mode) {
827 case MMC_POWER_UP:
828 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
James Hogane6f34e22013-03-12 10:43:32 +0000829 /* Power up slot */
830 if (slot->host->pdata->setpower)
831 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900832 regs = mci_readl(slot->host, PWREN);
833 regs |= (1 << slot->id);
834 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000835 break;
836 case MMC_POWER_OFF:
837 /* Power down slot */
838 if (slot->host->pdata->setpower)
839 slot->host->pdata->setpower(slot->id, 0);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900840 regs = mci_readl(slot->host, PWREN);
841 regs &= ~(1 << slot->id);
842 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -0500843 break;
844 default:
845 break;
846 }
847}
848
849static int dw_mci_get_ro(struct mmc_host *mmc)
850{
851 int read_only;
852 struct dw_mci_slot *slot = mmc_priv(mmc);
853 struct dw_mci_board *brd = slot->host->pdata;
854
855 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +0000856 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +0000857 read_only = 0;
858 else if (brd->get_ro)
Will Newtonf95f3852011-01-02 01:11:59 -0500859 read_only = brd->get_ro(slot->id);
Doug Anderson55a6ceb2013-01-11 17:03:53 +0000860 else if (gpio_is_valid(slot->wp_gpio))
861 read_only = gpio_get_value(slot->wp_gpio);
Will Newtonf95f3852011-01-02 01:11:59 -0500862 else
863 read_only =
864 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
865
866 dev_dbg(&mmc->class_dev, "card is %s\n",
867 read_only ? "read-only" : "read-write");
868
869 return read_only;
870}
871
872static int dw_mci_get_cd(struct mmc_host *mmc)
873{
874 int present;
875 struct dw_mci_slot *slot = mmc_priv(mmc);
876 struct dw_mci_board *brd = slot->host->pdata;
877
878 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +0900879 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
880 present = 1;
881 else if (brd->get_cd)
Will Newtonf95f3852011-01-02 01:11:59 -0500882 present = !brd->get_cd(slot->id);
883 else
884 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
885 == 0 ? 1 : 0;
886
887 if (present)
888 dev_dbg(&mmc->class_dev, "card is present\n");
889 else
890 dev_dbg(&mmc->class_dev, "card is not present\n");
891
892 return present;
893}
894
Doug Anderson9623b5b2012-07-25 08:33:17 -0700895/*
896 * Disable lower power mode.
897 *
898 * Low power mode will stop the card clock when idle. According to the
899 * description of the CLKENA register we should disable low power mode
900 * for SDIO cards if we need SDIO interrupts to work.
901 *
902 * This function is fast if low power mode is already disabled.
903 */
904static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
905{
906 struct dw_mci *host = slot->host;
907 u32 clk_en_a;
908 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
909
910 clk_en_a = mci_readl(host, CLKENA);
911
912 if (clk_en_a & clken_low_pwr) {
913 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
914 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
915 SDMMC_CMD_PRV_DAT_WAIT, 0);
916 }
917}
918
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530919static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
920{
921 struct dw_mci_slot *slot = mmc_priv(mmc);
922 struct dw_mci *host = slot->host;
923 u32 int_mask;
924
925 /* Enable/disable Slot Specific SDIO interrupt */
926 int_mask = mci_readl(host, INTMASK);
927 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -0700928 /*
929 * Turn off low power mode if it was enabled. This is a bit of
930 * a heavy operation and we disable / enable IRQs a lot, so
931 * we'll leave low power mode disabled and it will get
932 * re-enabled again in dw_mci_setup_bus().
933 */
934 dw_mci_disable_low_power(slot);
935
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530936 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +0900937 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530938 } else {
939 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +0900940 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530941 }
942}
943
Seungwon Jeon0976f162013-08-31 00:12:42 +0900944static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
945{
946 struct dw_mci_slot *slot = mmc_priv(mmc);
947 struct dw_mci *host = slot->host;
948 const struct dw_mci_drv_data *drv_data = host->drv_data;
949 struct dw_mci_tuning_data tuning_data;
950 int err = -ENOSYS;
951
952 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
953 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
954 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
955 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
956 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
957 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
958 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
959 } else {
960 return -EINVAL;
961 }
962 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
963 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
964 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
965 } else {
966 dev_err(host->dev,
967 "Undefined command(%d) for tuning\n", opcode);
968 return -EINVAL;
969 }
970
971 if (drv_data && drv_data->execute_tuning)
972 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
973 return err;
974}
975
Will Newtonf95f3852011-01-02 01:11:59 -0500976static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530977 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900978 .pre_req = dw_mci_pre_req,
979 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +0530980 .set_ios = dw_mci_set_ios,
981 .get_ro = dw_mci_get_ro,
982 .get_cd = dw_mci_get_cd,
983 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +0900984 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -0500985};
986
987static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
988 __releases(&host->lock)
989 __acquires(&host->lock)
990{
991 struct dw_mci_slot *slot;
992 struct mmc_host *prev_mmc = host->cur_slot->mmc;
993
994 WARN_ON(host->cmd || host->data);
995
996 host->cur_slot->mrq = NULL;
997 host->mrq = NULL;
998 if (!list_empty(&host->queue)) {
999 slot = list_entry(host->queue.next,
1000 struct dw_mci_slot, queue_node);
1001 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001002 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001003 mmc_hostname(slot->mmc));
1004 host->state = STATE_SENDING_CMD;
1005 dw_mci_start_request(host, slot);
1006 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001007 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001008 host->state = STATE_IDLE;
1009 }
1010
1011 spin_unlock(&host->lock);
1012 mmc_request_done(prev_mmc, mrq);
1013 spin_lock(&host->lock);
1014}
1015
1016static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1017{
1018 u32 status = host->cmd_status;
1019
1020 host->cmd_status = 0;
1021
1022 /* Read the response from the card (up to 16 bytes) */
1023 if (cmd->flags & MMC_RSP_PRESENT) {
1024 if (cmd->flags & MMC_RSP_136) {
1025 cmd->resp[3] = mci_readl(host, RESP0);
1026 cmd->resp[2] = mci_readl(host, RESP1);
1027 cmd->resp[1] = mci_readl(host, RESP2);
1028 cmd->resp[0] = mci_readl(host, RESP3);
1029 } else {
1030 cmd->resp[0] = mci_readl(host, RESP0);
1031 cmd->resp[1] = 0;
1032 cmd->resp[2] = 0;
1033 cmd->resp[3] = 0;
1034 }
1035 }
1036
1037 if (status & SDMMC_INT_RTO)
1038 cmd->error = -ETIMEDOUT;
1039 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1040 cmd->error = -EILSEQ;
1041 else if (status & SDMMC_INT_RESP_ERR)
1042 cmd->error = -EIO;
1043 else
1044 cmd->error = 0;
1045
1046 if (cmd->error) {
1047 /* newer ip versions need a delay between retries */
1048 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1049 mdelay(20);
1050
1051 if (cmd->data) {
Will Newtonf95f3852011-01-02 01:11:59 -05001052 dw_mci_stop_dma(host);
Seungwon Jeonfda5f732012-05-22 13:01:13 +09001053 host->data = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001054 }
1055 }
1056}
1057
1058static void dw_mci_tasklet_func(unsigned long priv)
1059{
1060 struct dw_mci *host = (struct dw_mci *)priv;
1061 struct mmc_data *data;
1062 struct mmc_command *cmd;
1063 enum dw_mci_state state;
1064 enum dw_mci_state prev_state;
James Hogan94dd5b32011-06-29 09:30:47 +01001065 u32 status, ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05001066
1067 spin_lock(&host->lock);
1068
1069 state = host->state;
1070 data = host->data;
1071
1072 do {
1073 prev_state = state;
1074
1075 switch (state) {
1076 case STATE_IDLE:
1077 break;
1078
1079 case STATE_SENDING_CMD:
1080 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1081 &host->pending_events))
1082 break;
1083
1084 cmd = host->cmd;
1085 host->cmd = NULL;
1086 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001087 dw_mci_command_complete(host, cmd);
1088 if (cmd == host->mrq->sbc && !cmd->error) {
1089 prev_state = state = STATE_SENDING_CMD;
1090 __dw_mci_start_request(host, host->cur_slot,
1091 host->mrq->cmd);
1092 goto unlock;
1093 }
1094
Will Newtonf95f3852011-01-02 01:11:59 -05001095 if (!host->mrq->data || cmd->error) {
1096 dw_mci_request_end(host, host->mrq);
1097 goto unlock;
1098 }
1099
1100 prev_state = state = STATE_SENDING_DATA;
1101 /* fall through */
1102
1103 case STATE_SENDING_DATA:
1104 if (test_and_clear_bit(EVENT_DATA_ERROR,
1105 &host->pending_events)) {
1106 dw_mci_stop_dma(host);
1107 if (data->stop)
1108 send_stop_cmd(host, data);
1109 state = STATE_DATA_ERROR;
1110 break;
1111 }
1112
1113 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1114 &host->pending_events))
1115 break;
1116
1117 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1118 prev_state = state = STATE_DATA_BUSY;
1119 /* fall through */
1120
1121 case STATE_DATA_BUSY:
1122 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1123 &host->pending_events))
1124 break;
1125
1126 host->data = NULL;
1127 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1128 status = host->data_status;
1129
1130 if (status & DW_MCI_DATA_ERROR_FLAGS) {
Jaehoon Chung3f7eec62013-05-27 13:47:57 +09001131 if (status & SDMMC_INT_DRTO) {
Will Newtonf95f3852011-01-02 01:11:59 -05001132 data->error = -ETIMEDOUT;
1133 } else if (status & SDMMC_INT_DCRC) {
Will Newtonf95f3852011-01-02 01:11:59 -05001134 data->error = -EILSEQ;
James Hogan55c5efbc2011-06-29 09:29:58 +01001135 } else if (status & SDMMC_INT_EBE &&
1136 host->dir_status ==
1137 DW_MCI_SEND_STATUS) {
1138 /*
1139 * No data CRC status was returned.
1140 * The number of bytes transferred will
1141 * be exaggerated in PIO mode.
1142 */
1143 data->bytes_xfered = 0;
1144 data->error = -ETIMEDOUT;
Will Newtonf95f3852011-01-02 01:11:59 -05001145 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001146 dev_err(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -05001147 "data FIFO error "
1148 "(status=%08x)\n",
1149 status);
1150 data->error = -EIO;
1151 }
James Hogan94dd5b32011-06-29 09:30:47 +01001152 /*
1153 * After an error, there may be data lingering
1154 * in the FIFO, so reset it - doing so
1155 * generates a block interrupt, hence setting
1156 * the scatter-gather pointer to NULL.
1157 */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001158 sg_miter_stop(&host->sg_miter);
James Hogan94dd5b32011-06-29 09:30:47 +01001159 host->sg = NULL;
1160 ctrl = mci_readl(host, CTRL);
1161 ctrl |= SDMMC_CTRL_FIFO_RESET;
1162 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05001163 } else {
1164 data->bytes_xfered = data->blocks * data->blksz;
1165 data->error = 0;
1166 }
1167
1168 if (!data->stop) {
1169 dw_mci_request_end(host, host->mrq);
1170 goto unlock;
1171 }
1172
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001173 if (host->mrq->sbc && !data->error) {
1174 data->stop->error = 0;
1175 dw_mci_request_end(host, host->mrq);
1176 goto unlock;
1177 }
1178
Will Newtonf95f3852011-01-02 01:11:59 -05001179 prev_state = state = STATE_SENDING_STOP;
1180 if (!data->error)
1181 send_stop_cmd(host, data);
1182 /* fall through */
1183
1184 case STATE_SENDING_STOP:
1185 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1186 &host->pending_events))
1187 break;
1188
1189 host->cmd = NULL;
1190 dw_mci_command_complete(host, host->mrq->stop);
1191 dw_mci_request_end(host, host->mrq);
1192 goto unlock;
1193
1194 case STATE_DATA_ERROR:
1195 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1196 &host->pending_events))
1197 break;
1198
1199 state = STATE_DATA_BUSY;
1200 break;
1201 }
1202 } while (state != prev_state);
1203
1204 host->state = state;
1205unlock:
1206 spin_unlock(&host->lock);
1207
1208}
1209
James Hogan34b664a2011-06-24 13:57:56 +01001210/* push final bytes to part_buf, only use during push */
1211static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1212{
1213 memcpy((void *)&host->part_buf, buf, cnt);
1214 host->part_buf_count = cnt;
1215}
1216
1217/* append bytes to part_buf, only use during push */
1218static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1219{
1220 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1221 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1222 host->part_buf_count += cnt;
1223 return cnt;
1224}
1225
1226/* pull first bytes from part_buf, only use during pull */
1227static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1228{
1229 cnt = min(cnt, (int)host->part_buf_count);
1230 if (cnt) {
1231 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1232 cnt);
1233 host->part_buf_count -= cnt;
1234 host->part_buf_start += cnt;
1235 }
1236 return cnt;
1237}
1238
1239/* pull final bytes from the part_buf, assuming it's just been filled */
1240static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1241{
1242 memcpy(buf, &host->part_buf, cnt);
1243 host->part_buf_start = cnt;
1244 host->part_buf_count = (1 << host->data_shift) - cnt;
1245}
1246
Will Newtonf95f3852011-01-02 01:11:59 -05001247static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1248{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001249 struct mmc_data *data = host->data;
1250 int init_cnt = cnt;
1251
James Hogan34b664a2011-06-24 13:57:56 +01001252 /* try and push anything in the part_buf */
1253 if (unlikely(host->part_buf_count)) {
1254 int len = dw_mci_push_part_bytes(host, buf, cnt);
1255 buf += len;
1256 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001257 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001258 mci_writew(host, DATA(host->data_offset),
1259 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001260 host->part_buf_count = 0;
1261 }
1262 }
1263#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1264 if (unlikely((unsigned long)buf & 0x1)) {
1265 while (cnt >= 2) {
1266 u16 aligned_buf[64];
1267 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1268 int items = len >> 1;
1269 int i;
1270 /* memcpy from input buffer into aligned buffer */
1271 memcpy(aligned_buf, buf, len);
1272 buf += len;
1273 cnt -= len;
1274 /* push data from aligned buffer into fifo */
1275 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001276 mci_writew(host, DATA(host->data_offset),
1277 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001278 }
1279 } else
1280#endif
1281 {
1282 u16 *pdata = buf;
1283 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001284 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001285 buf = pdata;
1286 }
1287 /* put anything remaining in the part_buf */
1288 if (cnt) {
1289 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001290 /* Push data if we have reached the expected data length */
1291 if ((data->bytes_xfered + init_cnt) ==
1292 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001293 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001294 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001295 }
1296}
1297
1298static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1299{
James Hogan34b664a2011-06-24 13:57:56 +01001300#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1301 if (unlikely((unsigned long)buf & 0x1)) {
1302 while (cnt >= 2) {
1303 /* pull data from fifo into aligned buffer */
1304 u16 aligned_buf[64];
1305 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1306 int items = len >> 1;
1307 int i;
1308 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001309 aligned_buf[i] = mci_readw(host,
1310 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001311 /* memcpy from aligned buffer into output buffer */
1312 memcpy(buf, aligned_buf, len);
1313 buf += len;
1314 cnt -= len;
1315 }
1316 } else
1317#endif
1318 {
1319 u16 *pdata = buf;
1320 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001321 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001322 buf = pdata;
1323 }
1324 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001325 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001326 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001327 }
1328}
1329
1330static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1331{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001332 struct mmc_data *data = host->data;
1333 int init_cnt = cnt;
1334
James Hogan34b664a2011-06-24 13:57:56 +01001335 /* try and push anything in the part_buf */
1336 if (unlikely(host->part_buf_count)) {
1337 int len = dw_mci_push_part_bytes(host, buf, cnt);
1338 buf += len;
1339 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001340 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001341 mci_writel(host, DATA(host->data_offset),
1342 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001343 host->part_buf_count = 0;
1344 }
1345 }
1346#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1347 if (unlikely((unsigned long)buf & 0x3)) {
1348 while (cnt >= 4) {
1349 u32 aligned_buf[32];
1350 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1351 int items = len >> 2;
1352 int i;
1353 /* memcpy from input buffer into aligned buffer */
1354 memcpy(aligned_buf, buf, len);
1355 buf += len;
1356 cnt -= len;
1357 /* push data from aligned buffer into fifo */
1358 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001359 mci_writel(host, DATA(host->data_offset),
1360 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001361 }
1362 } else
1363#endif
1364 {
1365 u32 *pdata = buf;
1366 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001367 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001368 buf = pdata;
1369 }
1370 /* put anything remaining in the part_buf */
1371 if (cnt) {
1372 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001373 /* Push data if we have reached the expected data length */
1374 if ((data->bytes_xfered + init_cnt) ==
1375 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001376 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001377 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001378 }
1379}
1380
1381static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1382{
James Hogan34b664a2011-06-24 13:57:56 +01001383#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1384 if (unlikely((unsigned long)buf & 0x3)) {
1385 while (cnt >= 4) {
1386 /* pull data from fifo into aligned buffer */
1387 u32 aligned_buf[32];
1388 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1389 int items = len >> 2;
1390 int i;
1391 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001392 aligned_buf[i] = mci_readl(host,
1393 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001394 /* memcpy from aligned buffer into output buffer */
1395 memcpy(buf, aligned_buf, len);
1396 buf += len;
1397 cnt -= len;
1398 }
1399 } else
1400#endif
1401 {
1402 u32 *pdata = buf;
1403 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001404 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001405 buf = pdata;
1406 }
1407 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001408 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001409 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001410 }
1411}
1412
1413static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1414{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001415 struct mmc_data *data = host->data;
1416 int init_cnt = cnt;
1417
James Hogan34b664a2011-06-24 13:57:56 +01001418 /* try and push anything in the part_buf */
1419 if (unlikely(host->part_buf_count)) {
1420 int len = dw_mci_push_part_bytes(host, buf, cnt);
1421 buf += len;
1422 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001423
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001424 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001425 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001426 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001427 host->part_buf_count = 0;
1428 }
1429 }
1430#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1431 if (unlikely((unsigned long)buf & 0x7)) {
1432 while (cnt >= 8) {
1433 u64 aligned_buf[16];
1434 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1435 int items = len >> 3;
1436 int i;
1437 /* memcpy from input buffer into aligned buffer */
1438 memcpy(aligned_buf, buf, len);
1439 buf += len;
1440 cnt -= len;
1441 /* push data from aligned buffer into fifo */
1442 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001443 mci_writeq(host, DATA(host->data_offset),
1444 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001445 }
1446 } else
1447#endif
1448 {
1449 u64 *pdata = buf;
1450 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001451 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001452 buf = pdata;
1453 }
1454 /* put anything remaining in the part_buf */
1455 if (cnt) {
1456 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001457 /* Push data if we have reached the expected data length */
1458 if ((data->bytes_xfered + init_cnt) ==
1459 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001460 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001461 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001462 }
1463}
1464
1465static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1466{
James Hogan34b664a2011-06-24 13:57:56 +01001467#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1468 if (unlikely((unsigned long)buf & 0x7)) {
1469 while (cnt >= 8) {
1470 /* pull data from fifo into aligned buffer */
1471 u64 aligned_buf[16];
1472 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1473 int items = len >> 3;
1474 int i;
1475 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001476 aligned_buf[i] = mci_readq(host,
1477 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001478 /* memcpy from aligned buffer into output buffer */
1479 memcpy(buf, aligned_buf, len);
1480 buf += len;
1481 cnt -= len;
1482 }
1483 } else
1484#endif
1485 {
1486 u64 *pdata = buf;
1487 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001488 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001489 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001490 }
James Hogan34b664a2011-06-24 13:57:56 +01001491 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001492 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001493 dw_mci_pull_final_bytes(host, buf, cnt);
1494 }
1495}
1496
1497static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1498{
1499 int len;
1500
1501 /* get remaining partial bytes */
1502 len = dw_mci_pull_part_bytes(host, buf, cnt);
1503 if (unlikely(len == cnt))
1504 return;
1505 buf += len;
1506 cnt -= len;
1507
1508 /* get the rest of the data */
1509 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001510}
1511
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001512static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001513{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001514 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1515 void *buf;
1516 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001517 struct mmc_data *data = host->data;
1518 int shift = host->data_shift;
1519 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001520 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001521 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001522
1523 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001524 if (!sg_miter_next(sg_miter))
1525 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001526
Imre Deak4225fc82013-02-27 17:02:57 -08001527 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001528 buf = sg_miter->addr;
1529 remain = sg_miter->length;
1530 offset = 0;
1531
1532 do {
1533 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1534 << shift) + host->part_buf_count;
1535 len = min(remain, fcnt);
1536 if (!len)
1537 break;
1538 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001539 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001540 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001541 remain -= len;
1542 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001543
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001544 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001545 status = mci_readl(host, MINTSTS);
1546 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001547 /* if the RXDR is ready read again */
1548 } while ((status & SDMMC_INT_RXDR) ||
1549 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001550
1551 if (!remain) {
1552 if (!sg_miter_next(sg_miter))
1553 goto done;
1554 sg_miter->consumed = 0;
1555 }
1556 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001557 return;
1558
1559done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001560 sg_miter_stop(sg_miter);
1561 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001562 smp_wmb();
1563 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1564}
1565
1566static void dw_mci_write_data_pio(struct dw_mci *host)
1567{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001568 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1569 void *buf;
1570 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001571 struct mmc_data *data = host->data;
1572 int shift = host->data_shift;
1573 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001574 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001575 unsigned int fifo_depth = host->fifo_depth;
1576 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001577
1578 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001579 if (!sg_miter_next(sg_miter))
1580 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001581
Imre Deak4225fc82013-02-27 17:02:57 -08001582 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001583 buf = sg_miter->addr;
1584 remain = sg_miter->length;
1585 offset = 0;
1586
1587 do {
1588 fcnt = ((fifo_depth -
1589 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1590 << shift) - host->part_buf_count;
1591 len = min(remain, fcnt);
1592 if (!len)
1593 break;
1594 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001595 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001596 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001597 remain -= len;
1598 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001599
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001600 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001601 status = mci_readl(host, MINTSTS);
1602 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001603 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001604
1605 if (!remain) {
1606 if (!sg_miter_next(sg_miter))
1607 goto done;
1608 sg_miter->consumed = 0;
1609 }
1610 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001611 return;
1612
1613done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001614 sg_miter_stop(sg_miter);
1615 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001616 smp_wmb();
1617 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1618}
1619
1620static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1621{
1622 if (!host->cmd_status)
1623 host->cmd_status = status;
1624
1625 smp_wmb();
1626
1627 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1628 tasklet_schedule(&host->tasklet);
1629}
1630
1631static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1632{
1633 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001634 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301635 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001636
Markos Chandras1fb5f682013-03-12 10:53:11 +00001637 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1638
Doug Anderson476d79f2013-07-09 13:04:40 -07001639 /*
1640 * DTO fix - version 2.10a and below, and only if internal DMA
1641 * is configured.
1642 */
1643 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1644 if (!pending &&
1645 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1646 pending |= SDMMC_INT_DATA_OVER;
1647 }
1648
Markos Chandras1fb5f682013-03-12 10:53:11 +00001649 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001650 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1651 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001652 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001653 smp_wmb();
1654 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001655 }
1656
1657 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1658 /* if there is an error report DATA_ERROR */
1659 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001660 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001661 smp_wmb();
1662 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001663 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001664 }
1665
1666 if (pending & SDMMC_INT_DATA_OVER) {
1667 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1668 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001669 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001670 smp_wmb();
1671 if (host->dir_status == DW_MCI_RECV_STATUS) {
1672 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001673 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001674 }
1675 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1676 tasklet_schedule(&host->tasklet);
1677 }
1678
1679 if (pending & SDMMC_INT_RXDR) {
1680 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001681 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001682 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001683 }
1684
1685 if (pending & SDMMC_INT_TXDR) {
1686 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001687 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001688 dw_mci_write_data_pio(host);
1689 }
1690
1691 if (pending & SDMMC_INT_CMD_DONE) {
1692 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001693 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001694 }
1695
1696 if (pending & SDMMC_INT_CD) {
1697 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001698 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001699 }
1700
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301701 /* Handle SDIO Interrupts */
1702 for (i = 0; i < host->num_slots; i++) {
1703 struct dw_mci_slot *slot = host->slot[i];
1704 if (pending & SDMMC_INT_SDIO(i)) {
1705 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1706 mmc_signal_sdio_irq(slot->mmc);
1707 }
1708 }
1709
Markos Chandras1fb5f682013-03-12 10:53:11 +00001710 }
Will Newtonf95f3852011-01-02 01:11:59 -05001711
1712#ifdef CONFIG_MMC_DW_IDMAC
1713 /* Handle DMA interrupts */
1714 pending = mci_readl(host, IDSTS);
1715 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1716 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1717 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001718 host->dma_ops->complete(host);
1719 }
1720#endif
1721
1722 return IRQ_HANDLED;
1723}
1724
James Hogan1791b13e2011-06-24 13:55:55 +01001725static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001726{
James Hogan1791b13e2011-06-24 13:55:55 +01001727 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001728 int i;
1729
1730 for (i = 0; i < host->num_slots; i++) {
1731 struct dw_mci_slot *slot = host->slot[i];
1732 struct mmc_host *mmc = slot->mmc;
1733 struct mmc_request *mrq;
1734 int present;
1735 u32 ctrl;
1736
1737 present = dw_mci_get_cd(mmc);
1738 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001739 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1740 present ? "inserted" : "removed");
1741
James Hogan1791b13e2011-06-24 13:55:55 +01001742 spin_lock_bh(&host->lock);
1743
Will Newtonf95f3852011-01-02 01:11:59 -05001744 /* Card change detected */
1745 slot->last_detect_state = present;
1746
James Hogan1791b13e2011-06-24 13:55:55 +01001747 /* Mark card as present if applicable */
1748 if (present != 0)
Will Newtonf95f3852011-01-02 01:11:59 -05001749 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001750
1751 /* Clean up queue if present */
1752 mrq = slot->mrq;
1753 if (mrq) {
1754 if (mrq == host->mrq) {
1755 host->data = NULL;
1756 host->cmd = NULL;
1757
1758 switch (host->state) {
1759 case STATE_IDLE:
1760 break;
1761 case STATE_SENDING_CMD:
1762 mrq->cmd->error = -ENOMEDIUM;
1763 if (!mrq->data)
1764 break;
1765 /* fall through */
1766 case STATE_SENDING_DATA:
1767 mrq->data->error = -ENOMEDIUM;
1768 dw_mci_stop_dma(host);
1769 break;
1770 case STATE_DATA_BUSY:
1771 case STATE_DATA_ERROR:
1772 if (mrq->data->error == -EINPROGRESS)
1773 mrq->data->error = -ENOMEDIUM;
1774 if (!mrq->stop)
1775 break;
1776 /* fall through */
1777 case STATE_SENDING_STOP:
1778 mrq->stop->error = -ENOMEDIUM;
1779 break;
1780 }
1781
1782 dw_mci_request_end(host, mrq);
1783 } else {
1784 list_del(&slot->queue_node);
1785 mrq->cmd->error = -ENOMEDIUM;
1786 if (mrq->data)
1787 mrq->data->error = -ENOMEDIUM;
1788 if (mrq->stop)
1789 mrq->stop->error = -ENOMEDIUM;
1790
1791 spin_unlock(&host->lock);
1792 mmc_request_done(slot->mmc, mrq);
1793 spin_lock(&host->lock);
1794 }
1795 }
1796
1797 /* Power down slot */
1798 if (present == 0) {
Will Newtonf95f3852011-01-02 01:11:59 -05001799 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1800
1801 /*
1802 * Clear down the FIFO - doing so generates a
1803 * block interrupt, hence setting the
1804 * scatter-gather pointer to NULL.
1805 */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001806 sg_miter_stop(&host->sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001807 host->sg = NULL;
1808
1809 ctrl = mci_readl(host, CTRL);
1810 ctrl |= SDMMC_CTRL_FIFO_RESET;
1811 mci_writel(host, CTRL, ctrl);
1812
1813#ifdef CONFIG_MMC_DW_IDMAC
1814 ctrl = mci_readl(host, BMOD);
Seungwon Jeon141a7122012-05-22 13:01:03 +09001815 /* Software reset of DMA */
1816 ctrl |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -05001817 mci_writel(host, BMOD, ctrl);
1818#endif
1819
1820 }
1821
James Hogan1791b13e2011-06-24 13:55:55 +01001822 spin_unlock_bh(&host->lock);
1823
Will Newtonf95f3852011-01-02 01:11:59 -05001824 present = dw_mci_get_cd(mmc);
1825 }
1826
1827 mmc_detect_change(slot->mmc,
1828 msecs_to_jiffies(host->pdata->detect_delay_ms));
1829 }
1830}
1831
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001832#ifdef CONFIG_OF
1833/* given a slot id, find out the device node representing that slot */
1834static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1835{
1836 struct device_node *np;
1837 const __be32 *addr;
1838 int len;
1839
1840 if (!dev || !dev->of_node)
1841 return NULL;
1842
1843 for_each_child_of_node(dev->of_node, np) {
1844 addr = of_get_property(np, "reg", &len);
1845 if (!addr || (len < sizeof(int)))
1846 continue;
1847 if (be32_to_cpup(addr) == slot)
1848 return np;
1849 }
1850 return NULL;
1851}
1852
Doug Andersona70aaa62013-01-11 17:03:50 +00001853static struct dw_mci_of_slot_quirks {
1854 char *quirk;
1855 int id;
1856} of_slot_quirks[] = {
1857 {
1858 .quirk = "disable-wp",
1859 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
1860 },
1861};
1862
1863static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1864{
1865 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1866 int quirks = 0;
1867 int idx;
1868
1869 /* get quirks */
1870 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
1871 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
1872 quirks |= of_slot_quirks[idx].id;
1873
1874 return quirks;
1875}
1876
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001877/* find out bus-width for a given slot */
1878static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1879{
1880 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1881 u32 bus_wd = 1;
1882
1883 if (!np)
1884 return 1;
1885
1886 if (of_property_read_u32(np, "bus-width", &bus_wd))
1887 dev_err(dev, "bus-width property not found, assuming width"
1888 " as 1\n");
1889 return bus_wd;
1890}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001891
1892/* find the write protect gpio for a given slot; or -1 if none specified */
1893static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1894{
1895 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1896 int gpio;
1897
1898 if (!np)
1899 return -EINVAL;
1900
1901 gpio = of_get_named_gpio(np, "wp-gpios", 0);
1902
1903 /* Having a missing entry is valid; return silently */
1904 if (!gpio_is_valid(gpio))
1905 return -EINVAL;
1906
1907 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
1908 dev_warn(dev, "gpio [%d] request failed\n", gpio);
1909 return -EINVAL;
1910 }
1911
1912 return gpio;
1913}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001914#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00001915static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1916{
1917 return 0;
1918}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001919static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1920{
1921 return 1;
1922}
1923static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1924{
1925 return NULL;
1926}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001927static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1928{
1929 return -EINVAL;
1930}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001931#endif /* CONFIG_OF */
1932
Jaehoon Chung36c179a2012-08-23 20:31:48 +09001933static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05001934{
1935 struct mmc_host *mmc;
1936 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00001937 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00001938 int ctrl_id, ret;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001939 u8 bus_width;
Will Newtonf95f3852011-01-02 01:11:59 -05001940
Thomas Abraham4a909202012-09-17 18:16:35 +00001941 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05001942 if (!mmc)
1943 return -ENOMEM;
1944
1945 slot = mmc_priv(mmc);
1946 slot->id = id;
1947 slot->mmc = mmc;
1948 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001949 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05001950
Doug Andersona70aaa62013-01-11 17:03:50 +00001951 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
1952
Will Newtonf95f3852011-01-02 01:11:59 -05001953 mmc->ops = &dw_mci_ops;
1954 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1955 mmc->f_max = host->bus_hz;
1956
1957 if (host->pdata->get_ocr)
1958 mmc->ocr_avail = host->pdata->get_ocr(id);
1959 else
1960 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1961
1962 /*
1963 * Start with slot power disabled, it will be enabled when a card
1964 * is detected.
1965 */
1966 if (host->pdata->setpower)
1967 host->pdata->setpower(id, 0);
1968
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001969 if (host->pdata->caps)
1970 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001971
Abhilash Kesavanab269122012-11-19 10:26:21 +05301972 if (host->pdata->pm_caps)
1973 mmc->pm_caps = host->pdata->pm_caps;
1974
Thomas Abraham800d78b2012-09-17 18:16:42 +00001975 if (host->dev->of_node) {
1976 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
1977 if (ctrl_id < 0)
1978 ctrl_id = 0;
1979 } else {
1980 ctrl_id = to_platform_device(host->dev)->id;
1981 }
James Hogancb27a842012-10-16 09:43:08 +01001982 if (drv_data && drv_data->caps)
1983 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00001984
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09001985 if (host->pdata->caps2)
1986 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09001987
Will Newtonf95f3852011-01-02 01:11:59 -05001988 if (host->pdata->get_bus_wd)
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001989 bus_width = host->pdata->get_bus_wd(slot->id);
1990 else if (host->dev->of_node)
1991 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
1992 else
1993 bus_width = 1;
1994
1995 switch (bus_width) {
1996 case 8:
1997 mmc->caps |= MMC_CAP_8_BIT_DATA;
1998 case 4:
1999 mmc->caps |= MMC_CAP_4_BIT_DATA;
2000 }
Will Newtonf95f3852011-01-02 01:11:59 -05002001
2002 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
Seungwon Jeon6daa7772011-08-05 12:35:03 +09002003 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
Will Newtonf95f3852011-01-02 01:11:59 -05002004
Will Newtonf95f3852011-01-02 01:11:59 -05002005 if (host->pdata->blk_settings) {
2006 mmc->max_segs = host->pdata->blk_settings->max_segs;
2007 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2008 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2009 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2010 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2011 } else {
2012 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002013#ifdef CONFIG_MMC_DW_IDMAC
2014 mmc->max_segs = host->ring_size;
2015 mmc->max_blk_size = 65536;
2016 mmc->max_blk_count = host->ring_size;
2017 mmc->max_seg_size = 0x1000;
2018 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2019#else
Will Newtonf95f3852011-01-02 01:11:59 -05002020 mmc->max_segs = 64;
2021 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2022 mmc->max_blk_count = 512;
2023 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2024 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002025#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002026 }
Will Newtonf95f3852011-01-02 01:11:59 -05002027
2028 if (dw_mci_get_cd(mmc))
2029 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2030 else
2031 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2032
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002033 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2034
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002035 ret = mmc_add_host(mmc);
2036 if (ret)
2037 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002038
2039#if defined(CONFIG_DEBUG_FS)
2040 dw_mci_init_debugfs(slot);
2041#endif
2042
2043 /* Card initially undetected */
2044 slot->last_detect_state = 0;
2045
Will Newtonf95f3852011-01-02 01:11:59 -05002046 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002047
2048err_setup_bus:
2049 mmc_free_host(mmc);
2050 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002051}
2052
2053static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2054{
2055 /* Shutdown detect IRQ */
2056 if (slot->host->pdata->exit)
2057 slot->host->pdata->exit(id);
2058
2059 /* Debugfs stuff is cleaned up by mmc core */
2060 mmc_remove_host(slot->mmc);
2061 slot->host->slot[id] = NULL;
2062 mmc_free_host(slot->mmc);
2063}
2064
2065static void dw_mci_init_dma(struct dw_mci *host)
2066{
2067 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002068 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002069 &host->sg_dma, GFP_KERNEL);
2070 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002071 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002072 __func__);
2073 goto no_dma;
2074 }
2075
2076 /* Determine which DMA interface to use */
2077#ifdef CONFIG_MMC_DW_IDMAC
2078 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002079 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002080#endif
2081
2082 if (!host->dma_ops)
2083 goto no_dma;
2084
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002085 if (host->dma_ops->init && host->dma_ops->start &&
2086 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002087 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002088 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002089 "DMA Controller.\n", __func__);
2090 goto no_dma;
2091 }
2092 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002093 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002094 goto no_dma;
2095 }
2096
2097 host->use_dma = 1;
2098 return;
2099
2100no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002101 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002102 host->use_dma = 0;
2103 return;
2104}
2105
2106static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2107{
2108 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2109 unsigned int ctrl;
2110
2111 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2112 SDMMC_CTRL_DMA_RESET));
2113
2114 /* wait till resets clear */
2115 do {
2116 ctrl = mci_readl(host, CTRL);
2117 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2118 SDMMC_CTRL_DMA_RESET)))
2119 return true;
2120 } while (time_before(jiffies, timeout));
2121
2122 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2123
2124 return false;
2125}
2126
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002127#ifdef CONFIG_OF
2128static struct dw_mci_of_quirks {
2129 char *quirk;
2130 int id;
2131} of_quirks[] = {
2132 {
2133 .quirk = "supports-highspeed",
2134 .id = DW_MCI_QUIRK_HIGHSPEED,
2135 }, {
2136 .quirk = "broken-cd",
2137 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2138 },
2139};
2140
2141static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2142{
2143 struct dw_mci_board *pdata;
2144 struct device *dev = host->dev;
2145 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002146 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002147 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002148 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002149
2150 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2151 if (!pdata) {
2152 dev_err(dev, "could not allocate memory for pdata\n");
2153 return ERR_PTR(-ENOMEM);
2154 }
2155
2156 /* find out number of slots supported */
2157 if (of_property_read_u32(dev->of_node, "num-slots",
2158 &pdata->num_slots)) {
2159 dev_info(dev, "num-slots property not found, "
2160 "assuming 1 slot is available\n");
2161 pdata->num_slots = 1;
2162 }
2163
2164 /* get quirks */
2165 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2166 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2167 pdata->quirks |= of_quirks[idx].id;
2168
2169 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2170 dev_info(dev, "fifo-depth property not found, using "
2171 "value of FIFOTH register as default\n");
2172
2173 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2174
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002175 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2176 pdata->bus_hz = clock_frequency;
2177
James Hogancb27a842012-10-16 09:43:08 +01002178 if (drv_data && drv_data->parse_dt) {
2179 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002180 if (ret)
2181 return ERR_PTR(ret);
2182 }
2183
Abhilash Kesavanab269122012-11-19 10:26:21 +05302184 if (of_find_property(np, "keep-power-in-suspend", NULL))
2185 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2186
2187 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2188 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2189
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002190 return pdata;
2191}
2192
2193#else /* CONFIG_OF */
2194static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2195{
2196 return ERR_PTR(-EINVAL);
2197}
2198#endif /* CONFIG_OF */
2199
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302200int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002201{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002202 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302203 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002204 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002205 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002206
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002207 if (!host->pdata) {
2208 host->pdata = dw_mci_parse_dt(host);
2209 if (IS_ERR(host->pdata)) {
2210 dev_err(host->dev, "platform data not available\n");
2211 return -EINVAL;
2212 }
Will Newtonf95f3852011-01-02 01:11:59 -05002213 }
2214
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302215 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002216 dev_err(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -05002217 "Platform data must supply select_slot function\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302218 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002219 }
2220
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002221 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002222 if (IS_ERR(host->biu_clk)) {
2223 dev_dbg(host->dev, "biu clock not available\n");
2224 } else {
2225 ret = clk_prepare_enable(host->biu_clk);
2226 if (ret) {
2227 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002228 return ret;
2229 }
Will Newtonf95f3852011-01-02 01:11:59 -05002230 }
2231
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002232 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002233 if (IS_ERR(host->ciu_clk)) {
2234 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002235 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002236 } else {
2237 ret = clk_prepare_enable(host->ciu_clk);
2238 if (ret) {
2239 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002240 goto err_clk_biu;
2241 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002242
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002243 if (host->pdata->bus_hz) {
2244 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2245 if (ret)
2246 dev_warn(host->dev,
2247 "Unable to set bus rate to %ul\n",
2248 host->pdata->bus_hz);
2249 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002250 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002251 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002252
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002253 if (drv_data && drv_data->init) {
2254 ret = drv_data->init(host);
2255 if (ret) {
2256 dev_err(host->dev,
2257 "implementation specific init failed\n");
2258 goto err_clk_ciu;
2259 }
2260 }
2261
James Hogancb27a842012-10-16 09:43:08 +01002262 if (drv_data && drv_data->setup_clock) {
2263 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002264 if (ret) {
2265 dev_err(host->dev,
2266 "implementation specific clock setup failed\n");
2267 goto err_clk_ciu;
2268 }
2269 }
2270
Mark Browna55d6ff2013-07-29 21:55:27 +01002271 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002272 if (IS_ERR(host->vmmc)) {
2273 ret = PTR_ERR(host->vmmc);
2274 if (ret == -EPROBE_DEFER)
2275 goto err_clk_ciu;
2276
2277 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2278 host->vmmc = NULL;
2279 } else {
2280 ret = regulator_enable(host->vmmc);
2281 if (ret) {
2282 if (ret != -EPROBE_DEFER)
2283 dev_err(host->dev,
2284 "regulator_enable fail: %d\n", ret);
2285 goto err_clk_ciu;
2286 }
2287 }
2288
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002289 if (!host->bus_hz) {
2290 dev_err(host->dev,
2291 "Platform data must supply bus speed\n");
2292 ret = -ENODEV;
Doug Anderson870556a2013-06-07 10:28:29 -07002293 goto err_regulator;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002294 }
2295
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302296 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002297
2298 spin_lock_init(&host->lock);
2299 INIT_LIST_HEAD(&host->queue);
2300
Will Newtonf95f3852011-01-02 01:11:59 -05002301 /*
2302 * Get the host data width - this assumes that HCON has been set with
2303 * the correct values.
2304 */
2305 i = (mci_readl(host, HCON) >> 7) & 0x7;
2306 if (!i) {
2307 host->push_data = dw_mci_push_data16;
2308 host->pull_data = dw_mci_pull_data16;
2309 width = 16;
2310 host->data_shift = 1;
2311 } else if (i == 2) {
2312 host->push_data = dw_mci_push_data64;
2313 host->pull_data = dw_mci_pull_data64;
2314 width = 64;
2315 host->data_shift = 3;
2316 } else {
2317 /* Check for a reserved value, and warn if it is */
2318 WARN((i != 1),
2319 "HCON reports a reserved host data width!\n"
2320 "Defaulting to 32-bit access.\n");
2321 host->push_data = dw_mci_push_data32;
2322 host->pull_data = dw_mci_pull_data32;
2323 width = 32;
2324 host->data_shift = 2;
2325 }
2326
2327 /* Reset all blocks */
Thomas Abraham4a909202012-09-17 18:16:35 +00002328 if (!mci_wait_reset(host->dev, host))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002329 return -ENODEV;
2330
2331 host->dma_ops = host->pdata->dma_ops;
2332 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002333
2334 /* Clear the interrupts for the host controller */
2335 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2336 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2337
2338 /* Put in max timeout */
2339 mci_writel(host, TMOUT, 0xFFFFFFFF);
2340
2341 /*
2342 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2343 * Tx Mark = fifo_size / 2 DMA Size = 8
2344 */
James Hoganb86d8252011-06-24 13:57:18 +01002345 if (!host->pdata->fifo_depth) {
2346 /*
2347 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2348 * have been overwritten by the bootloader, just like we're
2349 * about to do, so if you know the value for your hardware, you
2350 * should put it in the platform data.
2351 */
2352 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002353 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002354 } else {
2355 fifo_size = host->pdata->fifo_depth;
2356 }
2357 host->fifo_depth = fifo_size;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002358 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
2359 ((fifo_size/2) << 0));
2360 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002361
2362 /* disable clock to CIU */
2363 mci_writel(host, CLKENA, 0);
2364 mci_writel(host, CLKSRC, 0);
2365
James Hogan63008762013-03-12 10:43:54 +00002366 /*
2367 * In 2.40a spec, Data offset is changed.
2368 * Need to check the version-id and set data-offset for DATA register.
2369 */
2370 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2371 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2372
2373 if (host->verid < DW_MMC_240A)
2374 host->data_offset = DATA_OFFSET;
2375 else
2376 host->data_offset = DATA_240A_OFFSET;
2377
Will Newtonf95f3852011-01-02 01:11:59 -05002378 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002379 host->card_workqueue = alloc_workqueue("dw-mci-card",
James Hogan1791b13e2011-06-24 13:55:55 +01002380 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002381 if (!host->card_workqueue) {
2382 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002383 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002384 }
James Hogan1791b13e2011-06-24 13:55:55 +01002385 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002386 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2387 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002388 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002389 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002390
Will Newtonf95f3852011-01-02 01:11:59 -05002391 if (host->pdata->num_slots)
2392 host->num_slots = host->pdata->num_slots;
2393 else
2394 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2395
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302396 /*
2397 * Enable interrupts for command done, data over, data empty, card det,
2398 * receive ready and error such as transmit, receive timeout, crc error
2399 */
2400 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2401 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2402 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2403 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2404 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2405
2406 dev_info(host->dev, "DW MMC controller at irq %d, "
2407 "%d bit host data width, "
2408 "%u deep fifo\n",
2409 host->irq, width, fifo_size);
2410
Will Newtonf95f3852011-01-02 01:11:59 -05002411 /* We need at least one slot to succeed */
2412 for (i = 0; i < host->num_slots; i++) {
2413 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002414 if (ret)
2415 dev_dbg(host->dev, "slot %d init failed\n", i);
2416 else
2417 init_slots++;
2418 }
2419
2420 if (init_slots) {
2421 dev_info(host->dev, "%d slots initialized\n", init_slots);
2422 } else {
2423 dev_dbg(host->dev, "attempted to initialize %d slots, "
2424 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002425 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002426 }
2427
Will Newtonf95f3852011-01-02 01:11:59 -05002428 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002429 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002430
2431 return 0;
2432
James Hogan1791b13e2011-06-24 13:55:55 +01002433err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002434 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002435
Will Newtonf95f3852011-01-02 01:11:59 -05002436err_dmaunmap:
2437 if (host->use_dma && host->dma_ops->exit)
2438 host->dma_ops->exit(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002439
Doug Anderson870556a2013-06-07 10:28:29 -07002440err_regulator:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002441 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002442 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002443
2444err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002445 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002446 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002447
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002448err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002449 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002450 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002451
Will Newtonf95f3852011-01-02 01:11:59 -05002452 return ret;
2453}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302454EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002455
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302456void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002457{
Will Newtonf95f3852011-01-02 01:11:59 -05002458 int i;
2459
2460 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2461 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2462
Will Newtonf95f3852011-01-02 01:11:59 -05002463 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002464 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002465 if (host->slot[i])
2466 dw_mci_cleanup_slot(host->slot[i], i);
2467 }
2468
2469 /* disable clock to CIU */
2470 mci_writel(host, CLKENA, 0);
2471 mci_writel(host, CLKSRC, 0);
2472
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002473 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002474
2475 if (host->use_dma && host->dma_ops->exit)
2476 host->dma_ops->exit(host);
2477
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002478 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002479 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002480
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002481 if (!IS_ERR(host->ciu_clk))
2482 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002483
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002484 if (!IS_ERR(host->biu_clk))
2485 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002486}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302487EXPORT_SYMBOL(dw_mci_remove);
2488
2489
Will Newtonf95f3852011-01-02 01:11:59 -05002490
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002491#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002492/*
2493 * TODO: we should probably disable the clock to the card in the suspend path.
2494 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302495int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002496{
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302497 int i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002498
2499 for (i = 0; i < host->num_slots; i++) {
2500 struct dw_mci_slot *slot = host->slot[i];
2501 if (!slot)
2502 continue;
2503 ret = mmc_suspend_host(slot->mmc);
2504 if (ret < 0) {
2505 while (--i >= 0) {
2506 slot = host->slot[i];
2507 if (slot)
2508 mmc_resume_host(host->slot[i]->mmc);
2509 }
2510 return ret;
2511 }
2512 }
2513
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002514 if (host->vmmc)
2515 regulator_disable(host->vmmc);
2516
Will Newtonf95f3852011-01-02 01:11:59 -05002517 return 0;
2518}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302519EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002520
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302521int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002522{
2523 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002524
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302525 if (host->vmmc) {
2526 ret = regulator_enable(host->vmmc);
2527 if (ret) {
2528 dev_err(host->dev,
2529 "failed to enable regulator: %d\n", ret);
2530 return ret;
2531 }
2532 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002533
Thomas Abraham4a909202012-09-17 18:16:35 +00002534 if (!mci_wait_reset(host->dev, host)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002535 ret = -ENODEV;
2536 return ret;
2537 }
2538
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002539 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002540 host->dma_ops->init(host);
2541
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002542 /* Restore the old value at FIFOTH register */
2543 mci_writel(host, FIFOTH, host->fifoth_val);
2544
Doug Anderson2eb29442013-08-31 00:11:49 +09002545 /* Put in max timeout */
2546 mci_writel(host, TMOUT, 0xFFFFFFFF);
2547
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002548 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2549 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2550 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2551 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2552 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2553
Will Newtonf95f3852011-01-02 01:11:59 -05002554 for (i = 0; i < host->num_slots; i++) {
2555 struct dw_mci_slot *slot = host->slot[i];
2556 if (!slot)
2557 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302558 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2559 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2560 dw_mci_setup_bus(slot, true);
2561 }
2562
Will Newtonf95f3852011-01-02 01:11:59 -05002563 ret = mmc_resume_host(host->slot[i]->mmc);
2564 if (ret < 0)
2565 return ret;
2566 }
Will Newtonf95f3852011-01-02 01:11:59 -05002567 return 0;
2568}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302569EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002570#endif /* CONFIG_PM_SLEEP */
2571
Will Newtonf95f3852011-01-02 01:11:59 -05002572static int __init dw_mci_init(void)
2573{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302574 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302575 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002576}
2577
2578static void __exit dw_mci_exit(void)
2579{
Will Newtonf95f3852011-01-02 01:11:59 -05002580}
2581
2582module_init(dw_mci_init);
2583module_exit(dw_mci_exit);
2584
2585MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2586MODULE_AUTHOR("NXP Semiconductor VietNam");
2587MODULE_AUTHOR("Imagination Technologies Ltd");
2588MODULE_LICENSE("GPL v2");