blob: 9ef7c4ebed239482e791556d635adf37da5fa790 [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090032#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050033#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090035#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010036#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000037#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000038#include <linux/of_gpio.h>
Zhangfei Gaobf626e52014-01-09 22:35:10 +080039#include <linux/mmc/slot-gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050040
41#include "dw_mmc.h"
42
43/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090044#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050045 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090055#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
Will Newtonf95f3852011-01-02 01:11:59 -050058#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090059#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
Will Newtonf95f3852011-01-02 01:11:59 -050064struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040076 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050077
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
Seungwon Jeon0976f162013-08-31 00:12:42 +090084static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
Will Newtonf95f3852011-01-02 01:11:59 -050094
Seungwon Jeon0976f162013-08-31 00:12:42 +090095static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500112};
113
Seungwon Jeon31bff452013-08-31 00:14:23 +0900114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
Will Newtonf95f3852011-01-02 01:11:59 -0500117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
Will Newtonf95f3852011-01-02 01:11:59 -0500238static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
239{
240 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000241 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000242 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500243 u32 cmdr;
244 cmd->error = -EINPROGRESS;
245
246 cmdr = cmd->opcode;
247
Seungwon Jeon90c21432013-08-31 00:14:05 +0900248 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
249 cmd->opcode == MMC_GO_IDLE_STATE ||
250 cmd->opcode == MMC_GO_INACTIVE_STATE ||
251 (cmd->opcode == SD_IO_RW_DIRECT &&
252 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500253 cmdr |= SDMMC_CMD_STOP;
Jaehoon Chung4a1b27a2014-03-03 11:36:44 +0900254 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
255 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500256
257 if (cmd->flags & MMC_RSP_PRESENT) {
258 /* We expect a response, so set this bit */
259 cmdr |= SDMMC_CMD_RESP_EXP;
260 if (cmd->flags & MMC_RSP_136)
261 cmdr |= SDMMC_CMD_RESP_LONG;
262 }
263
264 if (cmd->flags & MMC_RSP_CRC)
265 cmdr |= SDMMC_CMD_RESP_CRC;
266
267 data = cmd->data;
268 if (data) {
269 cmdr |= SDMMC_CMD_DAT_EXP;
270 if (data->flags & MMC_DATA_STREAM)
271 cmdr |= SDMMC_CMD_STRM_MODE;
272 if (data->flags & MMC_DATA_WRITE)
273 cmdr |= SDMMC_CMD_DAT_WR;
274 }
275
James Hogancb27a842012-10-16 09:43:08 +0100276 if (drv_data && drv_data->prepare_command)
277 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000278
Will Newtonf95f3852011-01-02 01:11:59 -0500279 return cmdr;
280}
281
Seungwon Jeon90c21432013-08-31 00:14:05 +0900282static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
283{
284 struct mmc_command *stop;
285 u32 cmdr;
286
287 if (!cmd->data)
288 return 0;
289
290 stop = &host->stop_abort;
291 cmdr = cmd->opcode;
292 memset(stop, 0, sizeof(struct mmc_command));
293
294 if (cmdr == MMC_READ_SINGLE_BLOCK ||
295 cmdr == MMC_READ_MULTIPLE_BLOCK ||
296 cmdr == MMC_WRITE_BLOCK ||
297 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
298 stop->opcode = MMC_STOP_TRANSMISSION;
299 stop->arg = 0;
300 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
301 } else if (cmdr == SD_IO_RW_EXTENDED) {
302 stop->opcode = SD_IO_RW_DIRECT;
303 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
304 ((cmd->arg >> 28) & 0x7);
305 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
306 } else {
307 return 0;
308 }
309
310 cmdr = stop->opcode | SDMMC_CMD_STOP |
311 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
312
313 return cmdr;
314}
315
Will Newtonf95f3852011-01-02 01:11:59 -0500316static void dw_mci_start_command(struct dw_mci *host,
317 struct mmc_command *cmd, u32 cmd_flags)
318{
319 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000320 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500321 "start command: ARGR=0x%08x CMDR=0x%08x\n",
322 cmd->arg, cmd_flags);
323
324 mci_writel(host, CMDARG, cmd->arg);
325 wmb();
326
327 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
328}
329
Seungwon Jeon90c21432013-08-31 00:14:05 +0900330static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500331{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900332 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
333 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500334}
335
336/* DMA interface functions */
337static void dw_mci_stop_dma(struct dw_mci *host)
338{
James Hogan03e8cb52011-06-29 09:28:43 +0100339 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500340 host->dma_ops->stop(host);
341 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500342 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900343
344 /* Data transfer was stopped by the interrupt handler */
345 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500346}
347
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900348static int dw_mci_get_dma_dir(struct mmc_data *data)
349{
350 if (data->flags & MMC_DATA_WRITE)
351 return DMA_TO_DEVICE;
352 else
353 return DMA_FROM_DEVICE;
354}
355
Jaehoon Chung9beee912012-02-16 11:19:38 +0900356#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500357static void dw_mci_dma_cleanup(struct dw_mci *host)
358{
359 struct mmc_data *data = host->data;
360
361 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900362 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000363 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900364 data->sg,
365 data->sg_len,
366 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500367}
368
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900369static void dw_mci_idmac_reset(struct dw_mci *host)
370{
371 u32 bmod = mci_readl(host, BMOD);
372 /* Software reset of DMA */
373 bmod |= SDMMC_IDMAC_SWRESET;
374 mci_writel(host, BMOD, bmod);
375}
376
Will Newtonf95f3852011-01-02 01:11:59 -0500377static void dw_mci_idmac_stop_dma(struct dw_mci *host)
378{
379 u32 temp;
380
381 /* Disable and reset the IDMAC interface */
382 temp = mci_readl(host, CTRL);
383 temp &= ~SDMMC_CTRL_USE_IDMAC;
384 temp |= SDMMC_CTRL_DMA_RESET;
385 mci_writel(host, CTRL, temp);
386
387 /* Stop the IDMAC running */
388 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900389 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900390 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500391 mci_writel(host, BMOD, temp);
392}
393
394static void dw_mci_idmac_complete_dma(struct dw_mci *host)
395{
396 struct mmc_data *data = host->data;
397
Thomas Abraham4a909202012-09-17 18:16:35 +0000398 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500399
400 host->dma_ops->cleanup(host);
401
402 /*
403 * If the card was removed, data will be NULL. No point in trying to
404 * send the stop command or waiting for NBUSY in this case.
405 */
406 if (data) {
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 tasklet_schedule(&host->tasklet);
409 }
410}
411
412static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
413 unsigned int sg_len)
414{
415 int i;
416 struct idmac_desc *desc = host->sg_cpu;
417
418 for (i = 0; i < sg_len; i++, desc++) {
419 unsigned int length = sg_dma_len(&data->sg[i]);
420 u32 mem_addr = sg_dma_address(&data->sg[i]);
421
422 /* Set the OWN bit and disable interrupts for this descriptor */
423 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
424
425 /* Buffer length */
426 IDMAC_SET_BUFFER1_SIZE(desc, length);
427
428 /* Physical address to DMA to/from */
429 desc->des2 = mem_addr;
430 }
431
432 /* Set first descriptor */
433 desc = host->sg_cpu;
434 desc->des0 |= IDMAC_DES0_FD;
435
436 /* Set last descriptor */
437 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
438 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
439 desc->des0 |= IDMAC_DES0_LD;
440
441 wmb();
442}
443
444static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
445{
446 u32 temp;
447
448 dw_mci_translate_sglist(host, host->data, sg_len);
449
450 /* Select IDMAC interface */
451 temp = mci_readl(host, CTRL);
452 temp |= SDMMC_CTRL_USE_IDMAC;
453 mci_writel(host, CTRL, temp);
454
455 wmb();
456
457 /* Enable the IDMAC */
458 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900459 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500460 mci_writel(host, BMOD, temp);
461
462 /* Start it running */
463 mci_writel(host, PLDMND, 1);
464}
465
466static int dw_mci_idmac_init(struct dw_mci *host)
467{
468 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800469 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500470
471 /* Number of descriptors in the ring buffer */
472 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
473
474 /* Forward link the descriptor list */
475 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
476 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
477
478 /* Set the last descriptor as the end-of-ring descriptor */
479 p->des3 = host->sg_dma;
480 p->des0 = IDMAC_DES0_ER;
481
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900482 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900483
Will Newtonf95f3852011-01-02 01:11:59 -0500484 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900485 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500486 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
487 SDMMC_IDMAC_INT_TI);
488
489 /* Set the descriptor base address */
490 mci_writel(host, DBADDR, host->sg_dma);
491 return 0;
492}
493
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100494static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900495 .init = dw_mci_idmac_init,
496 .start = dw_mci_idmac_start_dma,
497 .stop = dw_mci_idmac_stop_dma,
498 .complete = dw_mci_idmac_complete_dma,
499 .cleanup = dw_mci_dma_cleanup,
500};
501#endif /* CONFIG_MMC_DW_IDMAC */
502
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900503static int dw_mci_pre_dma_transfer(struct dw_mci *host,
504 struct mmc_data *data,
505 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500506{
507 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900508 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500509
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900510 if (!next && data->host_cookie)
511 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500512
513 /*
514 * We don't do DMA on "complex" transfers, i.e. with
515 * non-word-aligned buffers or lengths. Also, we don't bother
516 * with all the DMA setup overhead for short transfers.
517 */
518 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
519 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900520
Will Newtonf95f3852011-01-02 01:11:59 -0500521 if (data->blksz & 3)
522 return -EINVAL;
523
524 for_each_sg(data->sg, sg, data->sg_len, i) {
525 if (sg->offset & 3 || sg->length & 3)
526 return -EINVAL;
527 }
528
Thomas Abraham4a909202012-09-17 18:16:35 +0000529 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900530 data->sg,
531 data->sg_len,
532 dw_mci_get_dma_dir(data));
533 if (sg_len == 0)
534 return -EINVAL;
535
536 if (next)
537 data->host_cookie = sg_len;
538
539 return sg_len;
540}
541
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900542static void dw_mci_pre_req(struct mmc_host *mmc,
543 struct mmc_request *mrq,
544 bool is_first_req)
545{
546 struct dw_mci_slot *slot = mmc_priv(mmc);
547 struct mmc_data *data = mrq->data;
548
549 if (!slot->host->use_dma || !data)
550 return;
551
552 if (data->host_cookie) {
553 data->host_cookie = 0;
554 return;
555 }
556
557 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
558 data->host_cookie = 0;
559}
560
561static void dw_mci_post_req(struct mmc_host *mmc,
562 struct mmc_request *mrq,
563 int err)
564{
565 struct dw_mci_slot *slot = mmc_priv(mmc);
566 struct mmc_data *data = mrq->data;
567
568 if (!slot->host->use_dma || !data)
569 return;
570
571 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000572 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900573 data->sg,
574 data->sg_len,
575 dw_mci_get_dma_dir(data));
576 data->host_cookie = 0;
577}
578
Seungwon Jeon52426892013-08-31 00:13:42 +0900579static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
580{
581#ifdef CONFIG_MMC_DW_IDMAC
582 unsigned int blksz = data->blksz;
583 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
584 u32 fifo_width = 1 << host->data_shift;
585 u32 blksz_depth = blksz / fifo_width, fifoth_val;
586 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
587 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
588
589 tx_wmark = (host->fifo_depth) / 2;
590 tx_wmark_invers = host->fifo_depth - tx_wmark;
591
592 /*
593 * MSIZE is '1',
594 * if blksz is not a multiple of the FIFO width
595 */
596 if (blksz % fifo_width) {
597 msize = 0;
598 rx_wmark = 1;
599 goto done;
600 }
601
602 do {
603 if (!((blksz_depth % mszs[idx]) ||
604 (tx_wmark_invers % mszs[idx]))) {
605 msize = idx;
606 rx_wmark = mszs[idx] - 1;
607 break;
608 }
609 } while (--idx > 0);
610 /*
611 * If idx is '0', it won't be tried
612 * Thus, initial values are uesed
613 */
614done:
615 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
616 mci_writel(host, FIFOTH, fifoth_val);
617#endif
618}
619
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900620static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
621{
622 unsigned int blksz = data->blksz;
623 u32 blksz_depth, fifo_depth;
624 u16 thld_size;
625
626 WARN_ON(!(data->flags & MMC_DATA_READ));
627
628 if (host->timing != MMC_TIMING_MMC_HS200 &&
629 host->timing != MMC_TIMING_UHS_SDR104)
630 goto disable;
631
632 blksz_depth = blksz / (1 << host->data_shift);
633 fifo_depth = host->fifo_depth;
634
635 if (blksz_depth > fifo_depth)
636 goto disable;
637
638 /*
639 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
640 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
641 * Currently just choose blksz.
642 */
643 thld_size = blksz;
644 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
645 return;
646
647disable:
648 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
649}
650
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900651static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
652{
653 int sg_len;
654 u32 temp;
655
656 host->using_dma = 0;
657
658 /* If we don't have a channel, we can't do DMA */
659 if (!host->use_dma)
660 return -ENODEV;
661
662 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900663 if (sg_len < 0) {
664 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900665 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900666 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900667
James Hogan03e8cb52011-06-29 09:28:43 +0100668 host->using_dma = 1;
669
Thomas Abraham4a909202012-09-17 18:16:35 +0000670 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500671 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
672 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
673 sg_len);
674
Seungwon Jeon52426892013-08-31 00:13:42 +0900675 /*
676 * Decide the MSIZE and RX/TX Watermark.
677 * If current block size is same with previous size,
678 * no need to update fifoth.
679 */
680 if (host->prev_blksz != data->blksz)
681 dw_mci_adjust_fifoth(host, data);
682
Will Newtonf95f3852011-01-02 01:11:59 -0500683 /* Enable the DMA interface */
684 temp = mci_readl(host, CTRL);
685 temp |= SDMMC_CTRL_DMA_ENABLE;
686 mci_writel(host, CTRL, temp);
687
688 /* Disable RX/TX IRQs, let DMA handle it */
689 temp = mci_readl(host, INTMASK);
690 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
691 mci_writel(host, INTMASK, temp);
692
693 host->dma_ops->start(host, sg_len);
694
695 return 0;
696}
697
698static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
699{
700 u32 temp;
701
702 data->error = -EINPROGRESS;
703
704 WARN_ON(host->data);
705 host->sg = NULL;
706 host->data = data;
707
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900708 if (data->flags & MMC_DATA_READ) {
James Hogan55c5efbc2011-06-29 09:29:58 +0100709 host->dir_status = DW_MCI_RECV_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900710 dw_mci_ctrl_rd_thld(host, data);
711 } else {
James Hogan55c5efbc2011-06-29 09:29:58 +0100712 host->dir_status = DW_MCI_SEND_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900713 }
James Hogan55c5efbc2011-06-29 09:29:58 +0100714
Will Newtonf95f3852011-01-02 01:11:59 -0500715 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900716 int flags = SG_MITER_ATOMIC;
717 if (host->data->flags & MMC_DATA_READ)
718 flags |= SG_MITER_TO_SG;
719 else
720 flags |= SG_MITER_FROM_SG;
721
722 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500723 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100724 host->part_buf_start = 0;
725 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500726
James Hoganb40af3a2011-06-24 13:54:06 +0100727 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500728 temp = mci_readl(host, INTMASK);
729 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
730 mci_writel(host, INTMASK, temp);
731
732 temp = mci_readl(host, CTRL);
733 temp &= ~SDMMC_CTRL_DMA_ENABLE;
734 mci_writel(host, CTRL, temp);
Seungwon Jeon52426892013-08-31 00:13:42 +0900735
736 /*
737 * Use the initial fifoth_val for PIO mode.
738 * If next issued data may be transfered by DMA mode,
739 * prev_blksz should be invalidated.
740 */
741 mci_writel(host, FIFOTH, host->fifoth_val);
742 host->prev_blksz = 0;
743 } else {
744 /*
745 * Keep the current block size.
746 * It will be used to decide whether to update
747 * fifoth register next time.
748 */
749 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -0500750 }
751}
752
753static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
754{
755 struct dw_mci *host = slot->host;
756 unsigned long timeout = jiffies + msecs_to_jiffies(500);
757 unsigned int cmd_status = 0;
758
759 mci_writel(host, CMDARG, arg);
760 wmb();
761 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
762
763 while (time_before(jiffies, timeout)) {
764 cmd_status = mci_readl(host, CMD);
765 if (!(cmd_status & SDMMC_CMD_START))
766 return;
767 }
768 dev_err(&slot->mmc->class_dev,
769 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
770 cmd, arg, cmd_status);
771}
772
Abhilash Kesavanab269122012-11-19 10:26:21 +0530773static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500774{
775 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900776 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500777 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700778 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500779
Doug Andersonfdf492a2013-08-31 00:11:43 +0900780 if (!clock) {
781 mci_writel(host, CLKENA, 0);
782 mci_send_cmd(slot,
783 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
784 } else if (clock != host->current_speed || force_clkinit) {
785 div = host->bus_hz / clock;
786 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500787 /*
788 * move the + 1 after the divide to prevent
789 * over-clocking the card.
790 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900791 div += 1;
792
Doug Andersonfdf492a2013-08-31 00:11:43 +0900793 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500794
Doug Andersonfdf492a2013-08-31 00:11:43 +0900795 if ((clock << div) != slot->__clk_old || force_clkinit)
796 dev_info(&slot->mmc->class_dev,
797 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
798 slot->id, host->bus_hz, clock,
799 div ? ((host->bus_hz / div) >> 1) :
800 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500801
802 /* disable clock */
803 mci_writel(host, CLKENA, 0);
804 mci_writel(host, CLKSRC, 0);
805
806 /* inform CIU */
807 mci_send_cmd(slot,
808 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
809
810 /* set clock to desired speed */
811 mci_writel(host, CLKDIV, div);
812
813 /* inform CIU */
814 mci_send_cmd(slot,
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816
Doug Anderson9623b5b2012-07-25 08:33:17 -0700817 /* enable clock; only low power if no SDIO */
818 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
819 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
820 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
821 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500822
823 /* inform CIU */
824 mci_send_cmd(slot,
825 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
826
Doug Andersonfdf492a2013-08-31 00:11:43 +0900827 /* keep the clock with reflecting clock dividor */
828 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500829 }
830
Doug Andersonfdf492a2013-08-31 00:11:43 +0900831 host->current_speed = clock;
832
Will Newtonf95f3852011-01-02 01:11:59 -0500833 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900834 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500835}
836
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900837static void __dw_mci_start_request(struct dw_mci *host,
838 struct dw_mci_slot *slot,
839 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500840{
841 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500842 struct mmc_data *data;
843 u32 cmdflags;
844
845 mrq = slot->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500846
Will Newtonf95f3852011-01-02 01:11:59 -0500847 host->cur_slot = slot;
848 host->mrq = mrq;
849
850 host->pending_events = 0;
851 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900852 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500853 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900854 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500855
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900856 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500857 if (data) {
Jaehoon Chungf16afa82014-03-03 11:36:45 +0900858 mci_writel(host, TMOUT, 0xFFFFFFFF);
Will Newtonf95f3852011-01-02 01:11:59 -0500859 mci_writel(host, BYTCNT, data->blksz*data->blocks);
860 mci_writel(host, BLKSIZ, data->blksz);
861 }
862
Will Newtonf95f3852011-01-02 01:11:59 -0500863 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
864
865 /* this is the first command, send the initialization clock */
866 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
867 cmdflags |= SDMMC_CMD_INIT;
868
869 if (data) {
870 dw_mci_submit_data(host, data);
871 wmb();
872 }
873
874 dw_mci_start_command(host, cmd, cmdflags);
875
876 if (mrq->stop)
877 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +0900878 else
879 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -0500880}
881
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900882static void dw_mci_start_request(struct dw_mci *host,
883 struct dw_mci_slot *slot)
884{
885 struct mmc_request *mrq = slot->mrq;
886 struct mmc_command *cmd;
887
888 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
889 __dw_mci_start_request(host, slot, cmd);
890}
891
James Hogan7456caa2011-06-24 13:55:10 +0100892/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500893static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
894 struct mmc_request *mrq)
895{
896 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
897 host->state);
898
Will Newtonf95f3852011-01-02 01:11:59 -0500899 slot->mrq = mrq;
900
901 if (host->state == STATE_IDLE) {
902 host->state = STATE_SENDING_CMD;
903 dw_mci_start_request(host, slot);
904 } else {
905 list_add_tail(&slot->queue_node, &host->queue);
906 }
Will Newtonf95f3852011-01-02 01:11:59 -0500907}
908
909static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
910{
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct dw_mci *host = slot->host;
913
914 WARN_ON(slot->mrq);
915
James Hogan7456caa2011-06-24 13:55:10 +0100916 /*
917 * The check for card presence and queueing of the request must be
918 * atomic, otherwise the card could be removed in between and the
919 * request wouldn't fail until another card was inserted.
920 */
921 spin_lock_bh(&host->lock);
922
Will Newtonf95f3852011-01-02 01:11:59 -0500923 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100924 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500925 mrq->cmd->error = -ENOMEDIUM;
926 mmc_request_done(mmc, mrq);
927 return;
928 }
929
Will Newtonf95f3852011-01-02 01:11:59 -0500930 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100931
932 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500933}
934
935static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
936{
937 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000938 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900939 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500940
Will Newtonf95f3852011-01-02 01:11:59 -0500941 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500942 case MMC_BUS_WIDTH_4:
943 slot->ctype = SDMMC_CTYPE_4BIT;
944 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900945 case MMC_BUS_WIDTH_8:
946 slot->ctype = SDMMC_CTYPE_8BIT;
947 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900948 default:
949 /* set default 1 bit mode */
950 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500951 }
952
Seungwon Jeon3f514292012-01-02 16:00:02 +0900953 regs = mci_readl(slot->host, UHS_REG);
954
Jaehoon Chung41babf72011-02-24 13:46:11 +0900955 /* DDR mode set */
Seungwon Jeoncab3a802014-03-14 21:12:43 +0900956 if (ios->timing == MMC_TIMING_MMC_DDR52)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900957 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900958 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900959 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900960
961 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900962 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900963
Doug Andersonfdf492a2013-08-31 00:11:43 +0900964 /*
965 * Use mirror of ios->clock to prevent race with mmc
966 * core ios update when finding the minimum.
967 */
968 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500969
James Hogancb27a842012-10-16 09:43:08 +0100970 if (drv_data && drv_data->set_ios)
971 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000972
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900973 /* Slot specific timing and width adjustment */
974 dw_mci_setup_bus(slot, false);
975
Will Newtonf95f3852011-01-02 01:11:59 -0500976 switch (ios->power_mode) {
977 case MMC_POWER_UP:
978 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900979 regs = mci_readl(slot->host, PWREN);
980 regs |= (1 << slot->id);
981 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000982 break;
983 case MMC_POWER_OFF:
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900984 regs = mci_readl(slot->host, PWREN);
985 regs &= ~(1 << slot->id);
986 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -0500987 break;
988 default:
989 break;
990 }
991}
992
993static int dw_mci_get_ro(struct mmc_host *mmc)
994{
995 int read_only;
996 struct dw_mci_slot *slot = mmc_priv(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -0500997
998 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +0000999 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +00001000 read_only = 0;
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001001 else if (gpio_is_valid(slot->wp_gpio))
1002 read_only = gpio_get_value(slot->wp_gpio);
Will Newtonf95f3852011-01-02 01:11:59 -05001003 else
1004 read_only =
1005 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1006
1007 dev_dbg(&mmc->class_dev, "card is %s\n",
1008 read_only ? "read-only" : "read-write");
1009
1010 return read_only;
1011}
1012
1013static int dw_mci_get_cd(struct mmc_host *mmc)
1014{
1015 int present;
1016 struct dw_mci_slot *slot = mmc_priv(mmc);
1017 struct dw_mci_board *brd = slot->host->pdata;
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001018 struct dw_mci *host = slot->host;
1019 int gpio_cd = mmc_gpio_get_cd(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001020
1021 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001022 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1023 present = 1;
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001024 else if (!IS_ERR_VALUE(gpio_cd))
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001025 present = gpio_cd;
Will Newtonf95f3852011-01-02 01:11:59 -05001026 else
1027 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1028 == 0 ? 1 : 0;
1029
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001030 spin_lock_bh(&host->lock);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001031 if (present) {
1032 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001033 dev_dbg(&mmc->class_dev, "card is present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001034 } else {
1035 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001036 dev_dbg(&mmc->class_dev, "card is not present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001037 }
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001038 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -05001039
1040 return present;
1041}
1042
Doug Anderson9623b5b2012-07-25 08:33:17 -07001043/*
1044 * Disable lower power mode.
1045 *
1046 * Low power mode will stop the card clock when idle. According to the
1047 * description of the CLKENA register we should disable low power mode
1048 * for SDIO cards if we need SDIO interrupts to work.
1049 *
1050 * This function is fast if low power mode is already disabled.
1051 */
1052static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1053{
1054 struct dw_mci *host = slot->host;
1055 u32 clk_en_a;
1056 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1057
1058 clk_en_a = mci_readl(host, CLKENA);
1059
1060 if (clk_en_a & clken_low_pwr) {
1061 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1063 SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 }
1065}
1066
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301067static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1068{
1069 struct dw_mci_slot *slot = mmc_priv(mmc);
1070 struct dw_mci *host = slot->host;
1071 u32 int_mask;
1072
1073 /* Enable/disable Slot Specific SDIO interrupt */
1074 int_mask = mci_readl(host, INTMASK);
1075 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -07001076 /*
1077 * Turn off low power mode if it was enabled. This is a bit of
1078 * a heavy operation and we disable / enable IRQs a lot, so
1079 * we'll leave low power mode disabled and it will get
1080 * re-enabled again in dw_mci_setup_bus().
1081 */
1082 dw_mci_disable_low_power(slot);
1083
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301084 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001085 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301086 } else {
1087 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001088 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301089 }
1090}
1091
Seungwon Jeon0976f162013-08-31 00:12:42 +09001092static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1093{
1094 struct dw_mci_slot *slot = mmc_priv(mmc);
1095 struct dw_mci *host = slot->host;
1096 const struct dw_mci_drv_data *drv_data = host->drv_data;
1097 struct dw_mci_tuning_data tuning_data;
1098 int err = -ENOSYS;
1099
1100 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1101 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1102 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1103 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1104 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1105 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1106 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1107 } else {
1108 return -EINVAL;
1109 }
1110 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1111 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1112 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1113 } else {
1114 dev_err(host->dev,
1115 "Undefined command(%d) for tuning\n", opcode);
1116 return -EINVAL;
1117 }
1118
1119 if (drv_data && drv_data->execute_tuning)
1120 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1121 return err;
1122}
1123
Will Newtonf95f3852011-01-02 01:11:59 -05001124static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301125 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001126 .pre_req = dw_mci_pre_req,
1127 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301128 .set_ios = dw_mci_set_ios,
1129 .get_ro = dw_mci_get_ro,
1130 .get_cd = dw_mci_get_cd,
1131 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001132 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001133};
1134
1135static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1136 __releases(&host->lock)
1137 __acquires(&host->lock)
1138{
1139 struct dw_mci_slot *slot;
1140 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1141
1142 WARN_ON(host->cmd || host->data);
1143
1144 host->cur_slot->mrq = NULL;
1145 host->mrq = NULL;
1146 if (!list_empty(&host->queue)) {
1147 slot = list_entry(host->queue.next,
1148 struct dw_mci_slot, queue_node);
1149 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001150 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001151 mmc_hostname(slot->mmc));
1152 host->state = STATE_SENDING_CMD;
1153 dw_mci_start_request(host, slot);
1154 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001155 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001156 host->state = STATE_IDLE;
1157 }
1158
1159 spin_unlock(&host->lock);
1160 mmc_request_done(prev_mmc, mrq);
1161 spin_lock(&host->lock);
1162}
1163
Seungwon Jeone352c812013-08-31 00:14:17 +09001164static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001165{
1166 u32 status = host->cmd_status;
1167
1168 host->cmd_status = 0;
1169
1170 /* Read the response from the card (up to 16 bytes) */
1171 if (cmd->flags & MMC_RSP_PRESENT) {
1172 if (cmd->flags & MMC_RSP_136) {
1173 cmd->resp[3] = mci_readl(host, RESP0);
1174 cmd->resp[2] = mci_readl(host, RESP1);
1175 cmd->resp[1] = mci_readl(host, RESP2);
1176 cmd->resp[0] = mci_readl(host, RESP3);
1177 } else {
1178 cmd->resp[0] = mci_readl(host, RESP0);
1179 cmd->resp[1] = 0;
1180 cmd->resp[2] = 0;
1181 cmd->resp[3] = 0;
1182 }
1183 }
1184
1185 if (status & SDMMC_INT_RTO)
1186 cmd->error = -ETIMEDOUT;
1187 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1188 cmd->error = -EILSEQ;
1189 else if (status & SDMMC_INT_RESP_ERR)
1190 cmd->error = -EIO;
1191 else
1192 cmd->error = 0;
1193
1194 if (cmd->error) {
1195 /* newer ip versions need a delay between retries */
1196 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1197 mdelay(20);
Will Newtonf95f3852011-01-02 01:11:59 -05001198 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001199
1200 return cmd->error;
1201}
1202
1203static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1204{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001205 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001206
1207 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1208 if (status & SDMMC_INT_DRTO) {
1209 data->error = -ETIMEDOUT;
1210 } else if (status & SDMMC_INT_DCRC) {
1211 data->error = -EILSEQ;
1212 } else if (status & SDMMC_INT_EBE) {
1213 if (host->dir_status ==
1214 DW_MCI_SEND_STATUS) {
1215 /*
1216 * No data CRC status was returned.
1217 * The number of bytes transferred
1218 * will be exaggerated in PIO mode.
1219 */
1220 data->bytes_xfered = 0;
1221 data->error = -ETIMEDOUT;
1222 } else if (host->dir_status ==
1223 DW_MCI_RECV_STATUS) {
1224 data->error = -EIO;
1225 }
1226 } else {
1227 /* SDMMC_INT_SBE is included */
1228 data->error = -EIO;
1229 }
1230
1231 dev_err(host->dev, "data error, status 0x%08x\n", status);
1232
1233 /*
1234 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001235 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001236 */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001237 dw_mci_fifo_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001238 } else {
1239 data->bytes_xfered = data->blocks * data->blksz;
1240 data->error = 0;
1241 }
1242
1243 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001244}
1245
1246static void dw_mci_tasklet_func(unsigned long priv)
1247{
1248 struct dw_mci *host = (struct dw_mci *)priv;
1249 struct mmc_data *data;
1250 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001251 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001252 enum dw_mci_state state;
1253 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001254 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001255
1256 spin_lock(&host->lock);
1257
1258 state = host->state;
1259 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001260 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001261
1262 do {
1263 prev_state = state;
1264
1265 switch (state) {
1266 case STATE_IDLE:
1267 break;
1268
1269 case STATE_SENDING_CMD:
1270 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1271 &host->pending_events))
1272 break;
1273
1274 cmd = host->cmd;
1275 host->cmd = NULL;
1276 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001277 err = dw_mci_command_complete(host, cmd);
1278 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001279 prev_state = state = STATE_SENDING_CMD;
1280 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001281 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001282 goto unlock;
1283 }
1284
Seungwon Jeone352c812013-08-31 00:14:17 +09001285 if (cmd->data && err) {
Seungwon Jeon71abb132013-08-31 00:13:59 +09001286 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001287 send_stop_abort(host, data);
1288 state = STATE_SENDING_STOP;
1289 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001290 }
1291
Seungwon Jeone352c812013-08-31 00:14:17 +09001292 if (!cmd->data || err) {
1293 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001294 goto unlock;
1295 }
1296
1297 prev_state = state = STATE_SENDING_DATA;
1298 /* fall through */
1299
1300 case STATE_SENDING_DATA:
1301 if (test_and_clear_bit(EVENT_DATA_ERROR,
1302 &host->pending_events)) {
1303 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001304 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001305 state = STATE_DATA_ERROR;
1306 break;
1307 }
1308
1309 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1310 &host->pending_events))
1311 break;
1312
1313 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1314 prev_state = state = STATE_DATA_BUSY;
1315 /* fall through */
1316
1317 case STATE_DATA_BUSY:
1318 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1319 &host->pending_events))
1320 break;
1321
1322 host->data = NULL;
1323 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001324 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001325
Seungwon Jeone352c812013-08-31 00:14:17 +09001326 if (!err) {
1327 if (!data->stop || mrq->sbc) {
Sachin Kamat17c8bc82014-02-25 15:18:28 +05301328 if (mrq->sbc && data->stop)
Seungwon Jeone352c812013-08-31 00:14:17 +09001329 data->stop->error = 0;
1330 dw_mci_request_end(host, mrq);
1331 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001332 }
Will Newtonf95f3852011-01-02 01:11:59 -05001333
Seungwon Jeon90c21432013-08-31 00:14:05 +09001334 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001335 if (data->stop)
1336 send_stop_abort(host, data);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001337 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001338
1339 /*
1340 * If err has non-zero,
1341 * stop-abort command has been already issued.
1342 */
1343 prev_state = state = STATE_SENDING_STOP;
1344
Will Newtonf95f3852011-01-02 01:11:59 -05001345 /* fall through */
1346
1347 case STATE_SENDING_STOP:
1348 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1349 &host->pending_events))
1350 break;
1351
Seungwon Jeon71abb132013-08-31 00:13:59 +09001352 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001353 if (mrq->cmd->error && mrq->data)
1354 dw_mci_fifo_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09001355
Will Newtonf95f3852011-01-02 01:11:59 -05001356 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001357 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09001358
Seungwon Jeone352c812013-08-31 00:14:17 +09001359 if (mrq->stop)
1360 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001361 else
1362 host->cmd_status = 0;
1363
Seungwon Jeone352c812013-08-31 00:14:17 +09001364 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001365 goto unlock;
1366
1367 case STATE_DATA_ERROR:
1368 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1369 &host->pending_events))
1370 break;
1371
1372 state = STATE_DATA_BUSY;
1373 break;
1374 }
1375 } while (state != prev_state);
1376
1377 host->state = state;
1378unlock:
1379 spin_unlock(&host->lock);
1380
1381}
1382
James Hogan34b664a2011-06-24 13:57:56 +01001383/* push final bytes to part_buf, only use during push */
1384static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1385{
1386 memcpy((void *)&host->part_buf, buf, cnt);
1387 host->part_buf_count = cnt;
1388}
1389
1390/* append bytes to part_buf, only use during push */
1391static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1392{
1393 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1394 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1395 host->part_buf_count += cnt;
1396 return cnt;
1397}
1398
1399/* pull first bytes from part_buf, only use during pull */
1400static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1401{
1402 cnt = min(cnt, (int)host->part_buf_count);
1403 if (cnt) {
1404 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1405 cnt);
1406 host->part_buf_count -= cnt;
1407 host->part_buf_start += cnt;
1408 }
1409 return cnt;
1410}
1411
1412/* pull final bytes from the part_buf, assuming it's just been filled */
1413static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1414{
1415 memcpy(buf, &host->part_buf, cnt);
1416 host->part_buf_start = cnt;
1417 host->part_buf_count = (1 << host->data_shift) - cnt;
1418}
1419
Will Newtonf95f3852011-01-02 01:11:59 -05001420static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1421{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001422 struct mmc_data *data = host->data;
1423 int init_cnt = cnt;
1424
James Hogan34b664a2011-06-24 13:57:56 +01001425 /* try and push anything in the part_buf */
1426 if (unlikely(host->part_buf_count)) {
1427 int len = dw_mci_push_part_bytes(host, buf, cnt);
1428 buf += len;
1429 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001430 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001431 mci_writew(host, DATA(host->data_offset),
1432 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001433 host->part_buf_count = 0;
1434 }
1435 }
1436#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1437 if (unlikely((unsigned long)buf & 0x1)) {
1438 while (cnt >= 2) {
1439 u16 aligned_buf[64];
1440 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1441 int items = len >> 1;
1442 int i;
1443 /* memcpy from input buffer into aligned buffer */
1444 memcpy(aligned_buf, buf, len);
1445 buf += len;
1446 cnt -= len;
1447 /* push data from aligned buffer into fifo */
1448 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001449 mci_writew(host, DATA(host->data_offset),
1450 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001451 }
1452 } else
1453#endif
1454 {
1455 u16 *pdata = buf;
1456 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001457 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001458 buf = pdata;
1459 }
1460 /* put anything remaining in the part_buf */
1461 if (cnt) {
1462 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001463 /* Push data if we have reached the expected data length */
1464 if ((data->bytes_xfered + init_cnt) ==
1465 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001466 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001467 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001468 }
1469}
1470
1471static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1472{
James Hogan34b664a2011-06-24 13:57:56 +01001473#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1474 if (unlikely((unsigned long)buf & 0x1)) {
1475 while (cnt >= 2) {
1476 /* pull data from fifo into aligned buffer */
1477 u16 aligned_buf[64];
1478 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1479 int items = len >> 1;
1480 int i;
1481 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001482 aligned_buf[i] = mci_readw(host,
1483 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001484 /* memcpy from aligned buffer into output buffer */
1485 memcpy(buf, aligned_buf, len);
1486 buf += len;
1487 cnt -= len;
1488 }
1489 } else
1490#endif
1491 {
1492 u16 *pdata = buf;
1493 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001494 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001495 buf = pdata;
1496 }
1497 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001498 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001499 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001500 }
1501}
1502
1503static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1504{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001505 struct mmc_data *data = host->data;
1506 int init_cnt = cnt;
1507
James Hogan34b664a2011-06-24 13:57:56 +01001508 /* try and push anything in the part_buf */
1509 if (unlikely(host->part_buf_count)) {
1510 int len = dw_mci_push_part_bytes(host, buf, cnt);
1511 buf += len;
1512 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001513 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001514 mci_writel(host, DATA(host->data_offset),
1515 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001516 host->part_buf_count = 0;
1517 }
1518 }
1519#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1520 if (unlikely((unsigned long)buf & 0x3)) {
1521 while (cnt >= 4) {
1522 u32 aligned_buf[32];
1523 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1524 int items = len >> 2;
1525 int i;
1526 /* memcpy from input buffer into aligned buffer */
1527 memcpy(aligned_buf, buf, len);
1528 buf += len;
1529 cnt -= len;
1530 /* push data from aligned buffer into fifo */
1531 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001532 mci_writel(host, DATA(host->data_offset),
1533 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001534 }
1535 } else
1536#endif
1537 {
1538 u32 *pdata = buf;
1539 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001540 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001541 buf = pdata;
1542 }
1543 /* put anything remaining in the part_buf */
1544 if (cnt) {
1545 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001546 /* Push data if we have reached the expected data length */
1547 if ((data->bytes_xfered + init_cnt) ==
1548 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001549 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001550 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001551 }
1552}
1553
1554static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1555{
James Hogan34b664a2011-06-24 13:57:56 +01001556#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1557 if (unlikely((unsigned long)buf & 0x3)) {
1558 while (cnt >= 4) {
1559 /* pull data from fifo into aligned buffer */
1560 u32 aligned_buf[32];
1561 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1562 int items = len >> 2;
1563 int i;
1564 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001565 aligned_buf[i] = mci_readl(host,
1566 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001567 /* memcpy from aligned buffer into output buffer */
1568 memcpy(buf, aligned_buf, len);
1569 buf += len;
1570 cnt -= len;
1571 }
1572 } else
1573#endif
1574 {
1575 u32 *pdata = buf;
1576 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001577 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001578 buf = pdata;
1579 }
1580 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001581 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001582 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001583 }
1584}
1585
1586static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1587{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001588 struct mmc_data *data = host->data;
1589 int init_cnt = cnt;
1590
James Hogan34b664a2011-06-24 13:57:56 +01001591 /* try and push anything in the part_buf */
1592 if (unlikely(host->part_buf_count)) {
1593 int len = dw_mci_push_part_bytes(host, buf, cnt);
1594 buf += len;
1595 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001596
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001597 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001598 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001599 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001600 host->part_buf_count = 0;
1601 }
1602 }
1603#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1604 if (unlikely((unsigned long)buf & 0x7)) {
1605 while (cnt >= 8) {
1606 u64 aligned_buf[16];
1607 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1608 int items = len >> 3;
1609 int i;
1610 /* memcpy from input buffer into aligned buffer */
1611 memcpy(aligned_buf, buf, len);
1612 buf += len;
1613 cnt -= len;
1614 /* push data from aligned buffer into fifo */
1615 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001616 mci_writeq(host, DATA(host->data_offset),
1617 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001618 }
1619 } else
1620#endif
1621 {
1622 u64 *pdata = buf;
1623 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001624 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001625 buf = pdata;
1626 }
1627 /* put anything remaining in the part_buf */
1628 if (cnt) {
1629 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001630 /* Push data if we have reached the expected data length */
1631 if ((data->bytes_xfered + init_cnt) ==
1632 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001633 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001634 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001635 }
1636}
1637
1638static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1639{
James Hogan34b664a2011-06-24 13:57:56 +01001640#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1641 if (unlikely((unsigned long)buf & 0x7)) {
1642 while (cnt >= 8) {
1643 /* pull data from fifo into aligned buffer */
1644 u64 aligned_buf[16];
1645 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1646 int items = len >> 3;
1647 int i;
1648 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001649 aligned_buf[i] = mci_readq(host,
1650 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001651 /* memcpy from aligned buffer into output buffer */
1652 memcpy(buf, aligned_buf, len);
1653 buf += len;
1654 cnt -= len;
1655 }
1656 } else
1657#endif
1658 {
1659 u64 *pdata = buf;
1660 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001661 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001662 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001663 }
James Hogan34b664a2011-06-24 13:57:56 +01001664 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001665 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001666 dw_mci_pull_final_bytes(host, buf, cnt);
1667 }
1668}
1669
1670static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1671{
1672 int len;
1673
1674 /* get remaining partial bytes */
1675 len = dw_mci_pull_part_bytes(host, buf, cnt);
1676 if (unlikely(len == cnt))
1677 return;
1678 buf += len;
1679 cnt -= len;
1680
1681 /* get the rest of the data */
1682 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001683}
1684
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001685static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001686{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001687 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1688 void *buf;
1689 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001690 struct mmc_data *data = host->data;
1691 int shift = host->data_shift;
1692 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001693 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001694 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001695
1696 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001697 if (!sg_miter_next(sg_miter))
1698 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001699
Imre Deak4225fc82013-02-27 17:02:57 -08001700 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001701 buf = sg_miter->addr;
1702 remain = sg_miter->length;
1703 offset = 0;
1704
1705 do {
1706 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1707 << shift) + host->part_buf_count;
1708 len = min(remain, fcnt);
1709 if (!len)
1710 break;
1711 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001712 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001713 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001714 remain -= len;
1715 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001716
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001717 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001718 status = mci_readl(host, MINTSTS);
1719 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001720 /* if the RXDR is ready read again */
1721 } while ((status & SDMMC_INT_RXDR) ||
1722 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001723
1724 if (!remain) {
1725 if (!sg_miter_next(sg_miter))
1726 goto done;
1727 sg_miter->consumed = 0;
1728 }
1729 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001730 return;
1731
1732done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001733 sg_miter_stop(sg_miter);
1734 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001735 smp_wmb();
1736 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1737}
1738
1739static void dw_mci_write_data_pio(struct dw_mci *host)
1740{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001741 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1742 void *buf;
1743 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001744 struct mmc_data *data = host->data;
1745 int shift = host->data_shift;
1746 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001747 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001748 unsigned int fifo_depth = host->fifo_depth;
1749 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001750
1751 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001752 if (!sg_miter_next(sg_miter))
1753 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001754
Imre Deak4225fc82013-02-27 17:02:57 -08001755 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001756 buf = sg_miter->addr;
1757 remain = sg_miter->length;
1758 offset = 0;
1759
1760 do {
1761 fcnt = ((fifo_depth -
1762 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1763 << shift) - host->part_buf_count;
1764 len = min(remain, fcnt);
1765 if (!len)
1766 break;
1767 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001768 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001769 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001770 remain -= len;
1771 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001772
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001773 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001774 status = mci_readl(host, MINTSTS);
1775 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001776 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001777
1778 if (!remain) {
1779 if (!sg_miter_next(sg_miter))
1780 goto done;
1781 sg_miter->consumed = 0;
1782 }
1783 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001784 return;
1785
1786done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001787 sg_miter_stop(sg_miter);
1788 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001789 smp_wmb();
1790 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1791}
1792
1793static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1794{
1795 if (!host->cmd_status)
1796 host->cmd_status = status;
1797
1798 smp_wmb();
1799
1800 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1801 tasklet_schedule(&host->tasklet);
1802}
1803
1804static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1805{
1806 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001807 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301808 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001809
Markos Chandras1fb5f682013-03-12 10:53:11 +00001810 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1811
Doug Anderson476d79f2013-07-09 13:04:40 -07001812 /*
1813 * DTO fix - version 2.10a and below, and only if internal DMA
1814 * is configured.
1815 */
1816 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1817 if (!pending &&
1818 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1819 pending |= SDMMC_INT_DATA_OVER;
1820 }
1821
Markos Chandras1fb5f682013-03-12 10:53:11 +00001822 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001823 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1824 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001825 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001826 smp_wmb();
1827 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001828 }
1829
1830 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1831 /* if there is an error report DATA_ERROR */
1832 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001833 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001834 smp_wmb();
1835 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001836 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001837 }
1838
1839 if (pending & SDMMC_INT_DATA_OVER) {
1840 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1841 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001842 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001843 smp_wmb();
1844 if (host->dir_status == DW_MCI_RECV_STATUS) {
1845 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001846 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001847 }
1848 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1849 tasklet_schedule(&host->tasklet);
1850 }
1851
1852 if (pending & SDMMC_INT_RXDR) {
1853 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001854 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001855 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001856 }
1857
1858 if (pending & SDMMC_INT_TXDR) {
1859 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001860 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001861 dw_mci_write_data_pio(host);
1862 }
1863
1864 if (pending & SDMMC_INT_CMD_DONE) {
1865 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001866 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001867 }
1868
1869 if (pending & SDMMC_INT_CD) {
1870 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001871 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001872 }
1873
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301874 /* Handle SDIO Interrupts */
1875 for (i = 0; i < host->num_slots; i++) {
1876 struct dw_mci_slot *slot = host->slot[i];
1877 if (pending & SDMMC_INT_SDIO(i)) {
1878 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1879 mmc_signal_sdio_irq(slot->mmc);
1880 }
1881 }
1882
Markos Chandras1fb5f682013-03-12 10:53:11 +00001883 }
Will Newtonf95f3852011-01-02 01:11:59 -05001884
1885#ifdef CONFIG_MMC_DW_IDMAC
1886 /* Handle DMA interrupts */
1887 pending = mci_readl(host, IDSTS);
1888 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1889 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001891 host->dma_ops->complete(host);
1892 }
1893#endif
1894
1895 return IRQ_HANDLED;
1896}
1897
James Hogan1791b13e2011-06-24 13:55:55 +01001898static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001899{
James Hogan1791b13e2011-06-24 13:55:55 +01001900 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001901 int i;
1902
1903 for (i = 0; i < host->num_slots; i++) {
1904 struct dw_mci_slot *slot = host->slot[i];
1905 struct mmc_host *mmc = slot->mmc;
1906 struct mmc_request *mrq;
1907 int present;
Will Newtonf95f3852011-01-02 01:11:59 -05001908
1909 present = dw_mci_get_cd(mmc);
1910 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001911 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1912 present ? "inserted" : "removed");
1913
James Hogan1791b13e2011-06-24 13:55:55 +01001914 spin_lock_bh(&host->lock);
1915
Will Newtonf95f3852011-01-02 01:11:59 -05001916 /* Card change detected */
1917 slot->last_detect_state = present;
1918
Will Newtonf95f3852011-01-02 01:11:59 -05001919 /* Clean up queue if present */
1920 mrq = slot->mrq;
1921 if (mrq) {
1922 if (mrq == host->mrq) {
1923 host->data = NULL;
1924 host->cmd = NULL;
1925
1926 switch (host->state) {
1927 case STATE_IDLE:
1928 break;
1929 case STATE_SENDING_CMD:
1930 mrq->cmd->error = -ENOMEDIUM;
1931 if (!mrq->data)
1932 break;
1933 /* fall through */
1934 case STATE_SENDING_DATA:
1935 mrq->data->error = -ENOMEDIUM;
1936 dw_mci_stop_dma(host);
1937 break;
1938 case STATE_DATA_BUSY:
1939 case STATE_DATA_ERROR:
1940 if (mrq->data->error == -EINPROGRESS)
1941 mrq->data->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001942 /* fall through */
1943 case STATE_SENDING_STOP:
Seungwon Jeon90c21432013-08-31 00:14:05 +09001944 if (mrq->stop)
1945 mrq->stop->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001946 break;
1947 }
1948
1949 dw_mci_request_end(host, mrq);
1950 } else {
1951 list_del(&slot->queue_node);
1952 mrq->cmd->error = -ENOMEDIUM;
1953 if (mrq->data)
1954 mrq->data->error = -ENOMEDIUM;
1955 if (mrq->stop)
1956 mrq->stop->error = -ENOMEDIUM;
1957
1958 spin_unlock(&host->lock);
1959 mmc_request_done(slot->mmc, mrq);
1960 spin_lock(&host->lock);
1961 }
1962 }
1963
1964 /* Power down slot */
1965 if (present == 0) {
Seungwon Jeon31bff452013-08-31 00:14:23 +09001966 /* Clear down the FIFO */
1967 dw_mci_fifo_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001968#ifdef CONFIG_MMC_DW_IDMAC
Seungwon Jeon5ce9d962013-08-31 00:14:33 +09001969 dw_mci_idmac_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001970#endif
1971
1972 }
1973
James Hogan1791b13e2011-06-24 13:55:55 +01001974 spin_unlock_bh(&host->lock);
1975
Will Newtonf95f3852011-01-02 01:11:59 -05001976 present = dw_mci_get_cd(mmc);
1977 }
1978
1979 mmc_detect_change(slot->mmc,
1980 msecs_to_jiffies(host->pdata->detect_delay_ms));
1981 }
1982}
1983
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001984#ifdef CONFIG_OF
1985/* given a slot id, find out the device node representing that slot */
1986static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1987{
1988 struct device_node *np;
1989 const __be32 *addr;
1990 int len;
1991
1992 if (!dev || !dev->of_node)
1993 return NULL;
1994
1995 for_each_child_of_node(dev->of_node, np) {
1996 addr = of_get_property(np, "reg", &len);
1997 if (!addr || (len < sizeof(int)))
1998 continue;
1999 if (be32_to_cpup(addr) == slot)
2000 return np;
2001 }
2002 return NULL;
2003}
2004
Doug Andersona70aaa62013-01-11 17:03:50 +00002005static struct dw_mci_of_slot_quirks {
2006 char *quirk;
2007 int id;
2008} of_slot_quirks[] = {
2009 {
2010 .quirk = "disable-wp",
2011 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2012 },
2013};
2014
2015static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2016{
2017 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2018 int quirks = 0;
2019 int idx;
2020
2021 /* get quirks */
2022 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2023 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2024 quirks |= of_slot_quirks[idx].id;
2025
2026 return quirks;
2027}
2028
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002029/* find the write protect gpio for a given slot; or -1 if none specified */
2030static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2031{
2032 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2033 int gpio;
2034
2035 if (!np)
2036 return -EINVAL;
2037
2038 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2039
2040 /* Having a missing entry is valid; return silently */
2041 if (!gpio_is_valid(gpio))
2042 return -EINVAL;
2043
2044 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2045 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2046 return -EINVAL;
2047 }
2048
2049 return gpio;
2050}
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002051
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08002052/* find the cd gpio for a given slot */
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002053static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2054 struct mmc_host *mmc)
2055{
2056 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2057 int gpio;
2058
2059 if (!np)
2060 return;
2061
2062 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2063
2064 /* Having a missing entry is valid; return silently */
2065 if (!gpio_is_valid(gpio))
2066 return;
2067
2068 if (mmc_gpio_request_cd(mmc, gpio, 0))
2069 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2070}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002071#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00002072static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2073{
2074 return 0;
2075}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002076static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2077{
2078 return NULL;
2079}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002080static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2081{
2082 return -EINVAL;
2083}
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002084static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2085 struct mmc_host *mmc)
2086{
2087 return;
2088}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002089#endif /* CONFIG_OF */
2090
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002091static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002092{
2093 struct mmc_host *mmc;
2094 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002095 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002096 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002097 u32 freq[2];
Will Newtonf95f3852011-01-02 01:11:59 -05002098
Thomas Abraham4a909202012-09-17 18:16:35 +00002099 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002100 if (!mmc)
2101 return -ENOMEM;
2102
2103 slot = mmc_priv(mmc);
2104 slot->id = id;
2105 slot->mmc = mmc;
2106 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002107 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002108
Doug Andersona70aaa62013-01-11 17:03:50 +00002109 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2110
Will Newtonf95f3852011-01-02 01:11:59 -05002111 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002112 if (of_property_read_u32_array(host->dev->of_node,
2113 "clock-freq-min-max", freq, 2)) {
2114 mmc->f_min = DW_MCI_FREQ_MIN;
2115 mmc->f_max = DW_MCI_FREQ_MAX;
2116 } else {
2117 mmc->f_min = freq[0];
2118 mmc->f_max = freq[1];
2119 }
Will Newtonf95f3852011-01-02 01:11:59 -05002120
Jaehoon Chung907abd52014-03-03 11:36:43 +09002121 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
Will Newtonf95f3852011-01-02 01:11:59 -05002122
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002123 if (host->pdata->caps)
2124 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002125
Abhilash Kesavanab269122012-11-19 10:26:21 +05302126 if (host->pdata->pm_caps)
2127 mmc->pm_caps = host->pdata->pm_caps;
2128
Thomas Abraham800d78b2012-09-17 18:16:42 +00002129 if (host->dev->of_node) {
2130 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2131 if (ctrl_id < 0)
2132 ctrl_id = 0;
2133 } else {
2134 ctrl_id = to_platform_device(host->dev)->id;
2135 }
James Hogancb27a842012-10-16 09:43:08 +01002136 if (drv_data && drv_data->caps)
2137 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002138
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002139 if (host->pdata->caps2)
2140 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002141
Jaehoon Chungd8a4fb02014-03-03 11:36:41 +09002142 mmc_of_parse(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05002143
Will Newtonf95f3852011-01-02 01:11:59 -05002144 if (host->pdata->blk_settings) {
2145 mmc->max_segs = host->pdata->blk_settings->max_segs;
2146 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2147 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2148 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2149 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2150 } else {
2151 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002152#ifdef CONFIG_MMC_DW_IDMAC
2153 mmc->max_segs = host->ring_size;
2154 mmc->max_blk_size = 65536;
2155 mmc->max_blk_count = host->ring_size;
2156 mmc->max_seg_size = 0x1000;
2157 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2158#else
Will Newtonf95f3852011-01-02 01:11:59 -05002159 mmc->max_segs = 64;
2160 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2161 mmc->max_blk_count = 512;
2162 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2163 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002164#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002165 }
Will Newtonf95f3852011-01-02 01:11:59 -05002166
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002167 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002168 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002169
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002170 ret = mmc_add_host(mmc);
2171 if (ret)
2172 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002173
2174#if defined(CONFIG_DEBUG_FS)
2175 dw_mci_init_debugfs(slot);
2176#endif
2177
2178 /* Card initially undetected */
2179 slot->last_detect_state = 0;
2180
Will Newtonf95f3852011-01-02 01:11:59 -05002181 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002182
2183err_setup_bus:
2184 mmc_free_host(mmc);
2185 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002186}
2187
2188static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2189{
Will Newtonf95f3852011-01-02 01:11:59 -05002190 /* Debugfs stuff is cleaned up by mmc core */
2191 mmc_remove_host(slot->mmc);
2192 slot->host->slot[id] = NULL;
2193 mmc_free_host(slot->mmc);
2194}
2195
2196static void dw_mci_init_dma(struct dw_mci *host)
2197{
2198 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002199 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002200 &host->sg_dma, GFP_KERNEL);
2201 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002202 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002203 __func__);
2204 goto no_dma;
2205 }
2206
2207 /* Determine which DMA interface to use */
2208#ifdef CONFIG_MMC_DW_IDMAC
2209 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002210 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002211#endif
2212
2213 if (!host->dma_ops)
2214 goto no_dma;
2215
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002216 if (host->dma_ops->init && host->dma_ops->start &&
2217 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002218 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002219 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002220 "DMA Controller.\n", __func__);
2221 goto no_dma;
2222 }
2223 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002224 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002225 goto no_dma;
2226 }
2227
2228 host->use_dma = 1;
2229 return;
2230
2231no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002232 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002233 host->use_dma = 0;
2234 return;
2235}
2236
Seungwon Jeon31bff452013-08-31 00:14:23 +09002237static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002238{
2239 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002240 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002241
Seungwon Jeon31bff452013-08-31 00:14:23 +09002242 ctrl = mci_readl(host, CTRL);
2243 ctrl |= reset;
2244 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002245
2246 /* wait till resets clear */
2247 do {
2248 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002249 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002250 return true;
2251 } while (time_before(jiffies, timeout));
2252
Seungwon Jeon31bff452013-08-31 00:14:23 +09002253 dev_err(host->dev,
2254 "Timeout resetting block (ctrl reset %#x)\n",
2255 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002256
2257 return false;
2258}
2259
Seungwon Jeon31bff452013-08-31 00:14:23 +09002260static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2261{
2262 /*
2263 * Reseting generates a block interrupt, hence setting
2264 * the scatter-gather pointer to NULL.
2265 */
2266 if (host->sg) {
2267 sg_miter_stop(&host->sg_miter);
2268 host->sg = NULL;
2269 }
2270
2271 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2272}
2273
2274static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2275{
2276 return dw_mci_ctrl_reset(host,
2277 SDMMC_CTRL_FIFO_RESET |
2278 SDMMC_CTRL_RESET |
2279 SDMMC_CTRL_DMA_RESET);
2280}
2281
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002282#ifdef CONFIG_OF
2283static struct dw_mci_of_quirks {
2284 char *quirk;
2285 int id;
2286} of_quirks[] = {
2287 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002288 .quirk = "broken-cd",
2289 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2290 },
2291};
2292
2293static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2294{
2295 struct dw_mci_board *pdata;
2296 struct device *dev = host->dev;
2297 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002298 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002299 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002300 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002301
2302 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2303 if (!pdata) {
2304 dev_err(dev, "could not allocate memory for pdata\n");
2305 return ERR_PTR(-ENOMEM);
2306 }
2307
2308 /* find out number of slots supported */
2309 if (of_property_read_u32(dev->of_node, "num-slots",
2310 &pdata->num_slots)) {
2311 dev_info(dev, "num-slots property not found, "
2312 "assuming 1 slot is available\n");
2313 pdata->num_slots = 1;
2314 }
2315
2316 /* get quirks */
2317 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2318 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2319 pdata->quirks |= of_quirks[idx].id;
2320
2321 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2322 dev_info(dev, "fifo-depth property not found, using "
2323 "value of FIFOTH register as default\n");
2324
2325 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2326
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002327 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2328 pdata->bus_hz = clock_frequency;
2329
James Hogancb27a842012-10-16 09:43:08 +01002330 if (drv_data && drv_data->parse_dt) {
2331 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002332 if (ret)
2333 return ERR_PTR(ret);
2334 }
2335
Seungwon Jeon10b49842013-08-31 00:13:22 +09002336 if (of_find_property(np, "supports-highspeed", NULL))
2337 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2338
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002339 return pdata;
2340}
2341
2342#else /* CONFIG_OF */
2343static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2344{
2345 return ERR_PTR(-EINVAL);
2346}
2347#endif /* CONFIG_OF */
2348
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302349int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002350{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002351 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302352 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002353 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002354 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002355
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002356 if (!host->pdata) {
2357 host->pdata = dw_mci_parse_dt(host);
2358 if (IS_ERR(host->pdata)) {
2359 dev_err(host->dev, "platform data not available\n");
2360 return -EINVAL;
2361 }
Will Newtonf95f3852011-01-02 01:11:59 -05002362 }
2363
Jaehoon Chung907abd52014-03-03 11:36:43 +09002364 if (host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002365 dev_err(host->dev,
Jaehoon Chung907abd52014-03-03 11:36:43 +09002366 "Platform data must supply num_slots.\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302367 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002368 }
2369
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002370 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002371 if (IS_ERR(host->biu_clk)) {
2372 dev_dbg(host->dev, "biu clock not available\n");
2373 } else {
2374 ret = clk_prepare_enable(host->biu_clk);
2375 if (ret) {
2376 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002377 return ret;
2378 }
Will Newtonf95f3852011-01-02 01:11:59 -05002379 }
2380
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002381 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002382 if (IS_ERR(host->ciu_clk)) {
2383 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002384 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002385 } else {
2386 ret = clk_prepare_enable(host->ciu_clk);
2387 if (ret) {
2388 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002389 goto err_clk_biu;
2390 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002391
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002392 if (host->pdata->bus_hz) {
2393 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2394 if (ret)
2395 dev_warn(host->dev,
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002396 "Unable to set bus rate to %uHz\n",
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002397 host->pdata->bus_hz);
2398 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002399 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002400 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002401
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002402 if (!host->bus_hz) {
2403 dev_err(host->dev,
2404 "Platform data must supply bus speed\n");
2405 ret = -ENODEV;
2406 goto err_clk_ciu;
2407 }
2408
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002409 if (drv_data && drv_data->init) {
2410 ret = drv_data->init(host);
2411 if (ret) {
2412 dev_err(host->dev,
2413 "implementation specific init failed\n");
2414 goto err_clk_ciu;
2415 }
2416 }
2417
James Hogancb27a842012-10-16 09:43:08 +01002418 if (drv_data && drv_data->setup_clock) {
2419 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002420 if (ret) {
2421 dev_err(host->dev,
2422 "implementation specific clock setup failed\n");
2423 goto err_clk_ciu;
2424 }
2425 }
2426
Mark Browna55d6ff2013-07-29 21:55:27 +01002427 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002428 if (IS_ERR(host->vmmc)) {
2429 ret = PTR_ERR(host->vmmc);
2430 if (ret == -EPROBE_DEFER)
2431 goto err_clk_ciu;
2432
2433 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2434 host->vmmc = NULL;
2435 } else {
2436 ret = regulator_enable(host->vmmc);
2437 if (ret) {
2438 if (ret != -EPROBE_DEFER)
2439 dev_err(host->dev,
2440 "regulator_enable fail: %d\n", ret);
2441 goto err_clk_ciu;
2442 }
2443 }
2444
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302445 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002446
2447 spin_lock_init(&host->lock);
2448 INIT_LIST_HEAD(&host->queue);
2449
Will Newtonf95f3852011-01-02 01:11:59 -05002450 /*
2451 * Get the host data width - this assumes that HCON has been set with
2452 * the correct values.
2453 */
2454 i = (mci_readl(host, HCON) >> 7) & 0x7;
2455 if (!i) {
2456 host->push_data = dw_mci_push_data16;
2457 host->pull_data = dw_mci_pull_data16;
2458 width = 16;
2459 host->data_shift = 1;
2460 } else if (i == 2) {
2461 host->push_data = dw_mci_push_data64;
2462 host->pull_data = dw_mci_pull_data64;
2463 width = 64;
2464 host->data_shift = 3;
2465 } else {
2466 /* Check for a reserved value, and warn if it is */
2467 WARN((i != 1),
2468 "HCON reports a reserved host data width!\n"
2469 "Defaulting to 32-bit access.\n");
2470 host->push_data = dw_mci_push_data32;
2471 host->pull_data = dw_mci_pull_data32;
2472 width = 32;
2473 host->data_shift = 2;
2474 }
2475
2476 /* Reset all blocks */
Seungwon Jeon31bff452013-08-31 00:14:23 +09002477 if (!dw_mci_ctrl_all_reset(host))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002478 return -ENODEV;
2479
2480 host->dma_ops = host->pdata->dma_ops;
2481 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002482
2483 /* Clear the interrupts for the host controller */
2484 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2485 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2486
2487 /* Put in max timeout */
2488 mci_writel(host, TMOUT, 0xFFFFFFFF);
2489
2490 /*
2491 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2492 * Tx Mark = fifo_size / 2 DMA Size = 8
2493 */
James Hoganb86d8252011-06-24 13:57:18 +01002494 if (!host->pdata->fifo_depth) {
2495 /*
2496 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2497 * have been overwritten by the bootloader, just like we're
2498 * about to do, so if you know the value for your hardware, you
2499 * should put it in the platform data.
2500 */
2501 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002502 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002503 } else {
2504 fifo_size = host->pdata->fifo_depth;
2505 }
2506 host->fifo_depth = fifo_size;
Seungwon Jeon52426892013-08-31 00:13:42 +09002507 host->fifoth_val =
2508 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002509 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002510
2511 /* disable clock to CIU */
2512 mci_writel(host, CLKENA, 0);
2513 mci_writel(host, CLKSRC, 0);
2514
James Hogan63008762013-03-12 10:43:54 +00002515 /*
2516 * In 2.40a spec, Data offset is changed.
2517 * Need to check the version-id and set data-offset for DATA register.
2518 */
2519 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2520 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2521
2522 if (host->verid < DW_MMC_240A)
2523 host->data_offset = DATA_OFFSET;
2524 else
2525 host->data_offset = DATA_240A_OFFSET;
2526
Will Newtonf95f3852011-01-02 01:11:59 -05002527 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002528 host->card_workqueue = alloc_workqueue("dw-mci-card",
ZhangZhen59ff3eb2014-03-27 09:41:47 +08002529 WQ_MEM_RECLAIM, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002530 if (!host->card_workqueue) {
2531 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002532 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002533 }
James Hogan1791b13e2011-06-24 13:55:55 +01002534 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002535 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2536 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002537 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002538 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002539
Will Newtonf95f3852011-01-02 01:11:59 -05002540 if (host->pdata->num_slots)
2541 host->num_slots = host->pdata->num_slots;
2542 else
2543 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2544
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302545 /*
2546 * Enable interrupts for command done, data over, data empty, card det,
2547 * receive ready and error such as transmit, receive timeout, crc error
2548 */
2549 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2550 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2551 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2552 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2553 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2554
2555 dev_info(host->dev, "DW MMC controller at irq %d, "
2556 "%d bit host data width, "
2557 "%u deep fifo\n",
2558 host->irq, width, fifo_size);
2559
Will Newtonf95f3852011-01-02 01:11:59 -05002560 /* We need at least one slot to succeed */
2561 for (i = 0; i < host->num_slots; i++) {
2562 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002563 if (ret)
2564 dev_dbg(host->dev, "slot %d init failed\n", i);
2565 else
2566 init_slots++;
2567 }
2568
2569 if (init_slots) {
2570 dev_info(host->dev, "%d slots initialized\n", init_slots);
2571 } else {
2572 dev_dbg(host->dev, "attempted to initialize %d slots, "
2573 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002574 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002575 }
2576
Will Newtonf95f3852011-01-02 01:11:59 -05002577 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002578 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002579
2580 return 0;
2581
James Hogan1791b13e2011-06-24 13:55:55 +01002582err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002583 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002584
Will Newtonf95f3852011-01-02 01:11:59 -05002585err_dmaunmap:
2586 if (host->use_dma && host->dma_ops->exit)
2587 host->dma_ops->exit(host);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002588 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002589 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002590
2591err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002592 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002593 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002594
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002595err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002596 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002597 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002598
Will Newtonf95f3852011-01-02 01:11:59 -05002599 return ret;
2600}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302601EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002602
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302603void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002604{
Will Newtonf95f3852011-01-02 01:11:59 -05002605 int i;
2606
2607 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2608 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2609
Will Newtonf95f3852011-01-02 01:11:59 -05002610 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002611 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002612 if (host->slot[i])
2613 dw_mci_cleanup_slot(host->slot[i], i);
2614 }
2615
2616 /* disable clock to CIU */
2617 mci_writel(host, CLKENA, 0);
2618 mci_writel(host, CLKSRC, 0);
2619
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002620 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002621
2622 if (host->use_dma && host->dma_ops->exit)
2623 host->dma_ops->exit(host);
2624
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002625 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002626 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002627
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002628 if (!IS_ERR(host->ciu_clk))
2629 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002630
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002631 if (!IS_ERR(host->biu_clk))
2632 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002633}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302634EXPORT_SYMBOL(dw_mci_remove);
2635
2636
Will Newtonf95f3852011-01-02 01:11:59 -05002637
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002638#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002639/*
2640 * TODO: we should probably disable the clock to the card in the suspend path.
2641 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302642int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002643{
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002644 if (host->vmmc)
2645 regulator_disable(host->vmmc);
2646
Will Newtonf95f3852011-01-02 01:11:59 -05002647 return 0;
2648}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302649EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002650
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302651int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002652{
2653 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002654
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302655 if (host->vmmc) {
2656 ret = regulator_enable(host->vmmc);
2657 if (ret) {
2658 dev_err(host->dev,
2659 "failed to enable regulator: %d\n", ret);
2660 return ret;
2661 }
2662 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002663
Seungwon Jeon31bff452013-08-31 00:14:23 +09002664 if (!dw_mci_ctrl_all_reset(host)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002665 ret = -ENODEV;
2666 return ret;
2667 }
2668
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002669 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002670 host->dma_ops->init(host);
2671
Seungwon Jeon52426892013-08-31 00:13:42 +09002672 /*
2673 * Restore the initial value at FIFOTH register
2674 * And Invalidate the prev_blksz with zero
2675 */
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002676 mci_writel(host, FIFOTH, host->fifoth_val);
Seungwon Jeon52426892013-08-31 00:13:42 +09002677 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002678
Doug Anderson2eb29442013-08-31 00:11:49 +09002679 /* Put in max timeout */
2680 mci_writel(host, TMOUT, 0xFFFFFFFF);
2681
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002682 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2683 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2684 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2685 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2686 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2687
Will Newtonf95f3852011-01-02 01:11:59 -05002688 for (i = 0; i < host->num_slots; i++) {
2689 struct dw_mci_slot *slot = host->slot[i];
2690 if (!slot)
2691 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302692 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2693 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2694 dw_mci_setup_bus(slot, true);
2695 }
Will Newtonf95f3852011-01-02 01:11:59 -05002696 }
Will Newtonf95f3852011-01-02 01:11:59 -05002697 return 0;
2698}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302699EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002700#endif /* CONFIG_PM_SLEEP */
2701
Will Newtonf95f3852011-01-02 01:11:59 -05002702static int __init dw_mci_init(void)
2703{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302704 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302705 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002706}
2707
2708static void __exit dw_mci_exit(void)
2709{
Will Newtonf95f3852011-01-02 01:11:59 -05002710}
2711
2712module_init(dw_mci_init);
2713module_exit(dw_mci_exit);
2714
2715MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2716MODULE_AUTHOR("NXP Semiconductor VietNam");
2717MODULE_AUTHOR("Imagination Technologies Ltd");
2718MODULE_LICENSE("GPL v2");