blob: 441ca447c4ba5832c5d69f4666e7ec8d491d9388 [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
Doug Andersonb24c8b22014-12-02 15:42:46 -080030#include <linux/mmc/card.h>
Will Newtonf95f3852011-01-02 01:11:59 -050031#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
Doug Anderson01730552014-08-22 19:17:51 +053033#include <linux/mmc/sd.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090034#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050035#include <linux/mmc/dw_mmc.h>
36#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090037#include <linux/regulator/consumer.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000038#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000039#include <linux/of_gpio.h>
Zhangfei Gaobf626e52014-01-09 22:35:10 +080040#include <linux/mmc/slot-gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050041
42#include "dw_mmc.h"
43
44/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090045#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050046 SDMMC_INT_HTO | SDMMC_INT_SBE | \
Doug Anderson7a3c5672015-03-10 08:48:10 -070047 SDMMC_INT_EBE | SDMMC_INT_HLE)
Will Newtonf95f3852011-01-02 01:11:59 -050048#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
Doug Anderson7a3c5672015-03-10 08:48:10 -070049 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
Will Newtonf95f3852011-01-02 01:11:59 -050050#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
Doug Anderson7a3c5672015-03-10 08:48:10 -070051 DW_MCI_CMD_ERROR_FLAGS)
Will Newtonf95f3852011-01-02 01:11:59 -050052#define DW_MCI_SEND_STATUS 1
53#define DW_MCI_RECV_STATUS 2
54#define DW_MCI_DMA_THRESHOLD 16
55
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090056#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090059#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
Shawn Lincc190d42016-09-02 12:14:39 +080064#define DESC_RING_BUF_SZ PAGE_SIZE
65
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +000066struct idmac_desc_64addr {
67 u32 des0; /* Control Descriptor */
68
69 u32 des1; /* Reserved */
70
71 u32 des2; /*Buffer sizes */
72#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
Ben Dooks6687c422015-03-25 11:27:51 +000073 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
74 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +000075
76 u32 des3; /* Reserved */
77
78 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
79 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
80
81 u32 des6; /* Lower 32-bits of Next Descriptor Address */
82 u32 des7; /* Upper 32-bits of Next Descriptor Address */
83};
84
Will Newtonf95f3852011-01-02 01:11:59 -050085struct idmac_desc {
Ben Dooks6687c422015-03-25 11:27:51 +000086 __le32 des0; /* Control Descriptor */
Will Newtonf95f3852011-01-02 01:11:59 -050087#define IDMAC_DES0_DIC BIT(1)
88#define IDMAC_DES0_LD BIT(2)
89#define IDMAC_DES0_FD BIT(3)
90#define IDMAC_DES0_CH BIT(4)
91#define IDMAC_DES0_ER BIT(5)
92#define IDMAC_DES0_CES BIT(30)
93#define IDMAC_DES0_OWN BIT(31)
94
Ben Dooks6687c422015-03-25 11:27:51 +000095 __le32 des1; /* Buffer sizes */
Will Newtonf95f3852011-01-02 01:11:59 -050096#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Ben Dookse5306c32016-06-07 14:37:19 +010097 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
Will Newtonf95f3852011-01-02 01:11:59 -050098
Ben Dooks6687c422015-03-25 11:27:51 +000099 __le32 des2; /* buffer 1 physical address */
Will Newtonf95f3852011-01-02 01:11:59 -0500100
Ben Dooks6687c422015-03-25 11:27:51 +0000101 __le32 des3; /* buffer 2 physical address */
Will Newtonf95f3852011-01-02 01:11:59 -0500102};
Alexey Brodkin5959b322015-06-25 11:25:07 +0300103
104/* Each descriptor can transfer up to 4KB of data in chained mode */
105#define DW_MCI_DESC_DATA_LENGTH 0x1000
Will Newtonf95f3852011-01-02 01:11:59 -0500106
Sonny Rao3a33a942014-08-04 18:19:50 -0700107static bool dw_mci_reset(struct dw_mci *host);
Sonny Rao536f6b92014-10-16 09:58:05 -0700108static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
Doug Anderson0bdbd0e2015-02-20 12:31:56 -0800109static int dw_mci_card_busy(struct mmc_host *mmc);
Shawn Lin56f69112016-05-27 14:37:05 +0800110static int dw_mci_get_cd(struct mmc_host *mmc);
Seungwon Jeon31bff452013-08-31 00:14:23 +0900111
Will Newtonf95f3852011-01-02 01:11:59 -0500112#if defined(CONFIG_DEBUG_FS)
113static int dw_mci_req_show(struct seq_file *s, void *v)
114{
115 struct dw_mci_slot *slot = s->private;
116 struct mmc_request *mrq;
117 struct mmc_command *cmd;
118 struct mmc_command *stop;
119 struct mmc_data *data;
120
121 /* Make sure we get a consistent snapshot */
122 spin_lock_bh(&slot->host->lock);
123 mrq = slot->mrq;
124
125 if (mrq) {
126 cmd = mrq->cmd;
127 data = mrq->data;
128 stop = mrq->stop;
129
130 if (cmd)
131 seq_printf(s,
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 cmd->opcode, cmd->arg, cmd->flags,
134 cmd->resp[0], cmd->resp[1], cmd->resp[2],
135 cmd->resp[2], cmd->error);
136 if (data)
137 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
138 data->bytes_xfered, data->blocks,
139 data->blksz, data->flags, data->error);
140 if (stop)
141 seq_printf(s,
142 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
143 stop->opcode, stop->arg, stop->flags,
144 stop->resp[0], stop->resp[1], stop->resp[2],
145 stop->resp[2], stop->error);
146 }
147
148 spin_unlock_bh(&slot->host->lock);
149
150 return 0;
151}
152
153static int dw_mci_req_open(struct inode *inode, struct file *file)
154{
155 return single_open(file, dw_mci_req_show, inode->i_private);
156}
157
158static const struct file_operations dw_mci_req_fops = {
159 .owner = THIS_MODULE,
160 .open = dw_mci_req_open,
161 .read = seq_read,
162 .llseek = seq_lseek,
163 .release = single_release,
164};
165
166static int dw_mci_regs_show(struct seq_file *s, void *v)
167{
Jaehoon Chung21657ebd2016-11-17 16:40:33 +0900168 struct dw_mci *host = s->private;
169
170 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
171 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
172 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
173 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
174 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
175 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
Will Newtonf95f3852011-01-02 01:11:59 -0500176
177 return 0;
178}
179
180static int dw_mci_regs_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, dw_mci_regs_show, inode->i_private);
183}
184
185static const struct file_operations dw_mci_regs_fops = {
186 .owner = THIS_MODULE,
187 .open = dw_mci_regs_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
193static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
194{
195 struct mmc_host *mmc = slot->mmc;
196 struct dw_mci *host = slot->host;
197 struct dentry *root;
198 struct dentry *node;
199
200 root = mmc->debugfs_root;
201 if (!root)
202 return;
203
204 node = debugfs_create_file("regs", S_IRUSR, root, host,
205 &dw_mci_regs_fops);
206 if (!node)
207 goto err;
208
209 node = debugfs_create_file("req", S_IRUSR, root, slot,
210 &dw_mci_req_fops);
211 if (!node)
212 goto err;
213
214 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
215 if (!node)
216 goto err;
217
218 node = debugfs_create_x32("pending_events", S_IRUSR, root,
219 (u32 *)&host->pending_events);
220 if (!node)
221 goto err;
222
223 node = debugfs_create_x32("completed_events", S_IRUSR, root,
224 (u32 *)&host->completed_events);
225 if (!node)
226 goto err;
227
228 return;
229
230err:
231 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
232}
233#endif /* defined(CONFIG_DEBUG_FS) */
234
Doug Anderson01730552014-08-22 19:17:51 +0530235static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
236
Will Newtonf95f3852011-01-02 01:11:59 -0500237static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
238{
239 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000240 struct dw_mci_slot *slot = mmc_priv(mmc);
Doug Anderson01730552014-08-22 19:17:51 +0530241 struct dw_mci *host = slot->host;
Will Newtonf95f3852011-01-02 01:11:59 -0500242 u32 cmdr;
Will Newtonf95f3852011-01-02 01:11:59 -0500243
Shawn Lin0e3a22c2015-08-03 15:07:21 +0800244 cmd->error = -EINPROGRESS;
Will Newtonf95f3852011-01-02 01:11:59 -0500245 cmdr = cmd->opcode;
246
Seungwon Jeon90c21432013-08-31 00:14:05 +0900247 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
248 cmd->opcode == MMC_GO_IDLE_STATE ||
249 cmd->opcode == MMC_GO_INACTIVE_STATE ||
250 (cmd->opcode == SD_IO_RW_DIRECT &&
251 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500252 cmdr |= SDMMC_CMD_STOP;
Jaehoon Chung4a1b27a2014-03-03 11:36:44 +0900253 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
254 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500255
Doug Anderson01730552014-08-22 19:17:51 +0530256 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
257 u32 clk_en_a;
258
259 /* Special bit makes CMD11 not die */
260 cmdr |= SDMMC_CMD_VOLT_SWITCH;
261
262 /* Change state to continue to handle CMD11 weirdness */
263 WARN_ON(slot->host->state != STATE_SENDING_CMD);
264 slot->host->state = STATE_SENDING_CMD11;
265
266 /*
267 * We need to disable low power mode (automatic clock stop)
268 * while doing voltage switch so we don't confuse the card,
269 * since stopping the clock is a specific part of the UHS
270 * voltage change dance.
271 *
272 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
273 * unconditionally turned back on in dw_mci_setup_bus() if it's
274 * ever called with a non-zero clock. That shouldn't happen
275 * until the voltage change is all done.
276 */
277 clk_en_a = mci_readl(host, CLKENA);
278 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
279 mci_writel(host, CLKENA, clk_en_a);
280 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
281 SDMMC_CMD_PRV_DAT_WAIT, 0);
282 }
283
Will Newtonf95f3852011-01-02 01:11:59 -0500284 if (cmd->flags & MMC_RSP_PRESENT) {
285 /* We expect a response, so set this bit */
286 cmdr |= SDMMC_CMD_RESP_EXP;
287 if (cmd->flags & MMC_RSP_136)
288 cmdr |= SDMMC_CMD_RESP_LONG;
289 }
290
291 if (cmd->flags & MMC_RSP_CRC)
292 cmdr |= SDMMC_CMD_RESP_CRC;
293
294 data = cmd->data;
295 if (data) {
296 cmdr |= SDMMC_CMD_DAT_EXP;
Will Newtonf95f3852011-01-02 01:11:59 -0500297 if (data->flags & MMC_DATA_WRITE)
298 cmdr |= SDMMC_CMD_DAT_WR;
299 }
300
Jaehoon Chungaaaaeb72016-01-21 11:01:06 +0900301 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
302 cmdr |= SDMMC_CMD_USE_HOLD_REG;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000303
Will Newtonf95f3852011-01-02 01:11:59 -0500304 return cmdr;
305}
306
Seungwon Jeon90c21432013-08-31 00:14:05 +0900307static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
308{
309 struct mmc_command *stop;
310 u32 cmdr;
311
312 if (!cmd->data)
313 return 0;
314
315 stop = &host->stop_abort;
316 cmdr = cmd->opcode;
317 memset(stop, 0, sizeof(struct mmc_command));
318
319 if (cmdr == MMC_READ_SINGLE_BLOCK ||
320 cmdr == MMC_READ_MULTIPLE_BLOCK ||
321 cmdr == MMC_WRITE_BLOCK ||
Ulf Hansson6c2c6502014-12-01 16:13:39 +0100322 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
323 cmdr == MMC_SEND_TUNING_BLOCK ||
324 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
Seungwon Jeon90c21432013-08-31 00:14:05 +0900325 stop->opcode = MMC_STOP_TRANSMISSION;
326 stop->arg = 0;
327 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
328 } else if (cmdr == SD_IO_RW_EXTENDED) {
329 stop->opcode = SD_IO_RW_DIRECT;
330 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
331 ((cmd->arg >> 28) & 0x7);
332 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
333 } else {
334 return 0;
335 }
336
337 cmdr = stop->opcode | SDMMC_CMD_STOP |
338 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
339
340 return cmdr;
341}
342
Doug Anderson0bdbd0e2015-02-20 12:31:56 -0800343static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
344{
345 unsigned long timeout = jiffies + msecs_to_jiffies(500);
346
347 /*
348 * Databook says that before issuing a new data transfer command
349 * we need to check to see if the card is busy. Data transfer commands
350 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
351 *
352 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
353 * expected.
354 */
355 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
356 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
357 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
358 if (time_after(jiffies, timeout)) {
359 /* Command will fail; we'll pass error then */
360 dev_err(host->dev, "Busy; trying anyway\n");
361 break;
362 }
363 udelay(10);
364 }
365 }
366}
367
Will Newtonf95f3852011-01-02 01:11:59 -0500368static void dw_mci_start_command(struct dw_mci *host,
369 struct mmc_command *cmd, u32 cmd_flags)
370{
371 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000372 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500373 "start command: ARGR=0x%08x CMDR=0x%08x\n",
374 cmd->arg, cmd_flags);
375
376 mci_writel(host, CMDARG, cmd->arg);
Shawn Lin0e3a22c2015-08-03 15:07:21 +0800377 wmb(); /* drain writebuffer */
Doug Anderson0bdbd0e2015-02-20 12:31:56 -0800378 dw_mci_wait_while_busy(host, cmd_flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500379
380 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
381}
382
Seungwon Jeon90c21432013-08-31 00:14:05 +0900383static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500384{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900385 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
Shawn Lin0e3a22c2015-08-03 15:07:21 +0800386
Seungwon Jeon90c21432013-08-31 00:14:05 +0900387 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500388}
389
390/* DMA interface functions */
391static void dw_mci_stop_dma(struct dw_mci *host)
392{
James Hogan03e8cb52011-06-29 09:28:43 +0100393 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500394 host->dma_ops->stop(host);
395 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500396 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900397
398 /* Data transfer was stopped by the interrupt handler */
399 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500400}
401
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900402static int dw_mci_get_dma_dir(struct mmc_data *data)
403{
404 if (data->flags & MMC_DATA_WRITE)
405 return DMA_TO_DEVICE;
406 else
407 return DMA_FROM_DEVICE;
408}
409
Will Newtonf95f3852011-01-02 01:11:59 -0500410static void dw_mci_dma_cleanup(struct dw_mci *host)
411{
412 struct mmc_data *data = host->data;
413
414 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900415 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000416 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900417 data->sg,
418 data->sg_len,
419 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500420}
421
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900422static void dw_mci_idmac_reset(struct dw_mci *host)
423{
424 u32 bmod = mci_readl(host, BMOD);
425 /* Software reset of DMA */
426 bmod |= SDMMC_IDMAC_SWRESET;
427 mci_writel(host, BMOD, bmod);
428}
429
Will Newtonf95f3852011-01-02 01:11:59 -0500430static void dw_mci_idmac_stop_dma(struct dw_mci *host)
431{
432 u32 temp;
433
434 /* Disable and reset the IDMAC interface */
435 temp = mci_readl(host, CTRL);
436 temp &= ~SDMMC_CTRL_USE_IDMAC;
437 temp |= SDMMC_CTRL_DMA_RESET;
438 mci_writel(host, CTRL, temp);
439
440 /* Stop the IDMAC running */
441 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900442 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900443 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500444 mci_writel(host, BMOD, temp);
445}
446
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800447static void dw_mci_dmac_complete_dma(void *arg)
Will Newtonf95f3852011-01-02 01:11:59 -0500448{
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800449 struct dw_mci *host = arg;
Will Newtonf95f3852011-01-02 01:11:59 -0500450 struct mmc_data *data = host->data;
451
Thomas Abraham4a909202012-09-17 18:16:35 +0000452 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500453
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800454 if ((host->use_dma == TRANS_MODE_EDMAC) &&
455 data && (data->flags & MMC_DATA_READ))
456 /* Invalidate cache after read */
457 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
458 data->sg,
459 data->sg_len,
460 DMA_FROM_DEVICE);
461
Will Newtonf95f3852011-01-02 01:11:59 -0500462 host->dma_ops->cleanup(host);
463
464 /*
465 * If the card was removed, data will be NULL. No point in trying to
466 * send the stop command or waiting for NBUSY in this case.
467 */
468 if (data) {
469 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
470 tasklet_schedule(&host->tasklet);
471 }
472}
473
Will Newtonf95f3852011-01-02 01:11:59 -0500474static int dw_mci_idmac_init(struct dw_mci *host)
475{
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800476 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500477
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000478 if (host->dma_64bit_address == 1) {
479 struct idmac_desc_64addr *p;
480 /* Number of descriptors in the ring buffer */
Shawn Lincc190d42016-09-02 12:14:39 +0800481 host->ring_size =
482 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
Will Newtonf95f3852011-01-02 01:11:59 -0500483
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000484 /* Forward link the descriptor list */
485 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
486 i++, p++) {
487 p->des6 = (host->sg_dma +
488 (sizeof(struct idmac_desc_64addr) *
489 (i + 1))) & 0xffffffff;
Will Newtonf95f3852011-01-02 01:11:59 -0500490
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000491 p->des7 = (u64)(host->sg_dma +
492 (sizeof(struct idmac_desc_64addr) *
493 (i + 1))) >> 32;
494 /* Initialize reserved and buffer size fields to "0" */
495 p->des1 = 0;
496 p->des2 = 0;
497 p->des3 = 0;
498 }
499
500 /* Set the last descriptor as the end-of-ring descriptor */
501 p->des6 = host->sg_dma & 0xffffffff;
502 p->des7 = (u64)host->sg_dma >> 32;
503 p->des0 = IDMAC_DES0_ER;
504
505 } else {
506 struct idmac_desc *p;
507 /* Number of descriptors in the ring buffer */
Shawn Lincc190d42016-09-02 12:14:39 +0800508 host->ring_size =
509 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000510
511 /* Forward link the descriptor list */
Shawn Lin0e3a22c2015-08-03 15:07:21 +0800512 for (i = 0, p = host->sg_cpu;
513 i < host->ring_size - 1;
514 i++, p++) {
Ben Dooks6687c422015-03-25 11:27:51 +0000515 p->des3 = cpu_to_le32(host->sg_dma +
516 (sizeof(struct idmac_desc) * (i + 1)));
Zhangfei Gao4b244722015-04-30 22:16:28 +0800517 p->des1 = 0;
518 }
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000519
520 /* Set the last descriptor as the end-of-ring descriptor */
Ben Dooks6687c422015-03-25 11:27:51 +0000521 p->des3 = cpu_to_le32(host->sg_dma);
522 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000523 }
Will Newtonf95f3852011-01-02 01:11:59 -0500524
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900525 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900526
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000527 if (host->dma_64bit_address == 1) {
528 /* Mask out interrupts - get Tx & Rx complete only */
529 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
530 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
531 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
Will Newtonf95f3852011-01-02 01:11:59 -0500532
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +0000533 /* Set the descriptor base address */
534 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
535 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
536
537 } else {
538 /* Mask out interrupts - get Tx & Rx complete only */
539 mci_writel(host, IDSTS, IDMAC_INT_CLR);
540 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
541 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
542
543 /* Set the descriptor base address */
544 mci_writel(host, DBADDR, host->sg_dma);
545 }
546
Will Newtonf95f3852011-01-02 01:11:59 -0500547 return 0;
548}
549
Shawn Lin3b2a0672016-09-02 12:14:37 +0800550static inline int dw_mci_prepare_desc64(struct dw_mci *host,
551 struct mmc_data *data,
552 unsigned int sg_len)
553{
554 unsigned int desc_len;
555 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
556 unsigned long timeout;
557 int i;
558
559 desc_first = desc_last = desc = host->sg_cpu;
560
561 for (i = 0; i < sg_len; i++) {
562 unsigned int length = sg_dma_len(&data->sg[i]);
563
564 u64 mem_addr = sg_dma_address(&data->sg[i]);
565
566 for ( ; length ; desc++) {
567 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
568 length : DW_MCI_DESC_DATA_LENGTH;
569
570 length -= desc_len;
571
572 /*
573 * Wait for the former clear OWN bit operation
574 * of IDMAC to make sure that this descriptor
575 * isn't still owned by IDMAC as IDMAC's write
576 * ops and CPU's read ops are asynchronous.
577 */
578 timeout = jiffies + msecs_to_jiffies(100);
579 while (readl(&desc->des0) & IDMAC_DES0_OWN) {
580 if (time_after(jiffies, timeout))
581 goto err_own_bit;
582 udelay(10);
583 }
584
585 /*
586 * Set the OWN bit and disable interrupts
587 * for this descriptor
588 */
589 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
590 IDMAC_DES0_CH;
591
592 /* Buffer length */
593 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
594
595 /* Physical address to DMA to/from */
596 desc->des4 = mem_addr & 0xffffffff;
597 desc->des5 = mem_addr >> 32;
598
599 /* Update physical address for the next desc */
600 mem_addr += desc_len;
601
602 /* Save pointer to the last descriptor */
603 desc_last = desc;
604 }
605 }
606
607 /* Set first descriptor */
608 desc_first->des0 |= IDMAC_DES0_FD;
609
610 /* Set last descriptor */
611 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
612 desc_last->des0 |= IDMAC_DES0_LD;
613
614 return 0;
615err_own_bit:
616 /* restore the descriptor chain as it's polluted */
Colin Ian King26be9d72016-11-16 18:55:01 +0000617 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
Shawn Lincc190d42016-09-02 12:14:39 +0800618 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
Shawn Lin3b2a0672016-09-02 12:14:37 +0800619 dw_mci_idmac_init(host);
620 return -EINVAL;
621}
622
623
624static inline int dw_mci_prepare_desc32(struct dw_mci *host,
625 struct mmc_data *data,
626 unsigned int sg_len)
627{
628 unsigned int desc_len;
629 struct idmac_desc *desc_first, *desc_last, *desc;
630 unsigned long timeout;
631 int i;
632
633 desc_first = desc_last = desc = host->sg_cpu;
634
635 for (i = 0; i < sg_len; i++) {
636 unsigned int length = sg_dma_len(&data->sg[i]);
637
638 u32 mem_addr = sg_dma_address(&data->sg[i]);
639
640 for ( ; length ; desc++) {
641 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
642 length : DW_MCI_DESC_DATA_LENGTH;
643
644 length -= desc_len;
645
646 /*
647 * Wait for the former clear OWN bit operation
648 * of IDMAC to make sure that this descriptor
649 * isn't still owned by IDMAC as IDMAC's write
650 * ops and CPU's read ops are asynchronous.
651 */
652 timeout = jiffies + msecs_to_jiffies(100);
653 while (readl(&desc->des0) &
654 cpu_to_le32(IDMAC_DES0_OWN)) {
655 if (time_after(jiffies, timeout))
656 goto err_own_bit;
657 udelay(10);
658 }
659
660 /*
661 * Set the OWN bit and disable interrupts
662 * for this descriptor
663 */
664 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
665 IDMAC_DES0_DIC |
666 IDMAC_DES0_CH);
667
668 /* Buffer length */
669 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
670
671 /* Physical address to DMA to/from */
672 desc->des2 = cpu_to_le32(mem_addr);
673
674 /* Update physical address for the next desc */
675 mem_addr += desc_len;
676
677 /* Save pointer to the last descriptor */
678 desc_last = desc;
679 }
680 }
681
682 /* Set first descriptor */
683 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
684
685 /* Set last descriptor */
686 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
687 IDMAC_DES0_DIC));
688 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
689
690 return 0;
691err_own_bit:
692 /* restore the descriptor chain as it's polluted */
Colin Ian King26be9d72016-11-16 18:55:01 +0000693 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
Shawn Lincc190d42016-09-02 12:14:39 +0800694 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
Shawn Lin3b2a0672016-09-02 12:14:37 +0800695 dw_mci_idmac_init(host);
696 return -EINVAL;
697}
698
699static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
700{
701 u32 temp;
702 int ret;
703
704 if (host->dma_64bit_address == 1)
705 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
706 else
707 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
708
709 if (ret)
710 goto out;
711
712 /* drain writebuffer */
713 wmb();
714
715 /* Make sure to reset DMA in case we did PIO before this */
716 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
717 dw_mci_idmac_reset(host);
718
719 /* Select IDMAC interface */
720 temp = mci_readl(host, CTRL);
721 temp |= SDMMC_CTRL_USE_IDMAC;
722 mci_writel(host, CTRL, temp);
723
724 /* drain writebuffer */
725 wmb();
726
727 /* Enable the IDMAC */
728 temp = mci_readl(host, BMOD);
729 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
730 mci_writel(host, BMOD, temp);
731
732 /* Start it running */
733 mci_writel(host, PLDMND, 1);
734
735out:
736 return ret;
737}
738
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100739static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900740 .init = dw_mci_idmac_init,
741 .start = dw_mci_idmac_start_dma,
742 .stop = dw_mci_idmac_stop_dma,
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800743 .complete = dw_mci_dmac_complete_dma,
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900744 .cleanup = dw_mci_dma_cleanup,
745};
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800746
747static void dw_mci_edmac_stop_dma(struct dw_mci *host)
748{
Shawn Linab925a32016-03-09 10:34:46 +0800749 dmaengine_terminate_async(host->dms->ch);
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800750}
751
752static int dw_mci_edmac_start_dma(struct dw_mci *host,
753 unsigned int sg_len)
754{
755 struct dma_slave_config cfg;
756 struct dma_async_tx_descriptor *desc = NULL;
757 struct scatterlist *sgl = host->data->sg;
758 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
759 u32 sg_elems = host->data->sg_len;
760 u32 fifoth_val;
761 u32 fifo_offset = host->fifo_reg - host->regs;
762 int ret = 0;
763
764 /* Set external dma config: burst size, burst width */
Arnd Bergmann260b3162015-11-12 15:14:23 +0100765 cfg.dst_addr = host->phy_regs + fifo_offset;
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800766 cfg.src_addr = cfg.dst_addr;
767 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
768 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
769
770 /* Match burst msize with external dma config */
771 fifoth_val = mci_readl(host, FIFOTH);
772 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
773 cfg.src_maxburst = cfg.dst_maxburst;
774
775 if (host->data->flags & MMC_DATA_WRITE)
776 cfg.direction = DMA_MEM_TO_DEV;
777 else
778 cfg.direction = DMA_DEV_TO_MEM;
779
780 ret = dmaengine_slave_config(host->dms->ch, &cfg);
781 if (ret) {
782 dev_err(host->dev, "Failed to config edmac.\n");
783 return -EBUSY;
784 }
785
786 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
787 sg_len, cfg.direction,
788 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
789 if (!desc) {
790 dev_err(host->dev, "Can't prepare slave sg.\n");
791 return -EBUSY;
792 }
793
794 /* Set dw_mci_dmac_complete_dma as callback */
795 desc->callback = dw_mci_dmac_complete_dma;
796 desc->callback_param = (void *)host;
797 dmaengine_submit(desc);
798
799 /* Flush cache before write */
800 if (host->data->flags & MMC_DATA_WRITE)
801 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
802 sg_elems, DMA_TO_DEVICE);
803
804 dma_async_issue_pending(host->dms->ch);
805
806 return 0;
807}
808
809static int dw_mci_edmac_init(struct dw_mci *host)
810{
811 /* Request external dma channel */
812 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
813 if (!host->dms)
814 return -ENOMEM;
815
816 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
817 if (!host->dms->ch) {
Dan Carpenter4539d362015-10-22 22:53:46 +0300818 dev_err(host->dev, "Failed to get external DMA channel.\n");
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800819 kfree(host->dms);
820 host->dms = NULL;
821 return -ENXIO;
822 }
823
824 return 0;
825}
826
827static void dw_mci_edmac_exit(struct dw_mci *host)
828{
829 if (host->dms) {
830 if (host->dms->ch) {
831 dma_release_channel(host->dms->ch);
832 host->dms->ch = NULL;
833 }
834 kfree(host->dms);
835 host->dms = NULL;
836 }
837}
838
839static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
840 .init = dw_mci_edmac_init,
841 .exit = dw_mci_edmac_exit,
842 .start = dw_mci_edmac_start_dma,
843 .stop = dw_mci_edmac_stop_dma,
844 .complete = dw_mci_dmac_complete_dma,
845 .cleanup = dw_mci_dma_cleanup,
846};
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900847
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900848static int dw_mci_pre_dma_transfer(struct dw_mci *host,
849 struct mmc_data *data,
850 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500851{
852 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900853 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500854
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900855 if (!next && data->host_cookie)
856 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500857
858 /*
859 * We don't do DMA on "complex" transfers, i.e. with
860 * non-word-aligned buffers or lengths. Also, we don't bother
861 * with all the DMA setup overhead for short transfers.
862 */
863 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
864 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900865
Will Newtonf95f3852011-01-02 01:11:59 -0500866 if (data->blksz & 3)
867 return -EINVAL;
868
869 for_each_sg(data->sg, sg, data->sg_len, i) {
870 if (sg->offset & 3 || sg->length & 3)
871 return -EINVAL;
872 }
873
Thomas Abraham4a909202012-09-17 18:16:35 +0000874 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900875 data->sg,
876 data->sg_len,
877 dw_mci_get_dma_dir(data));
878 if (sg_len == 0)
879 return -EINVAL;
880
881 if (next)
882 data->host_cookie = sg_len;
883
884 return sg_len;
885}
886
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900887static void dw_mci_pre_req(struct mmc_host *mmc,
888 struct mmc_request *mrq,
889 bool is_first_req)
890{
891 struct dw_mci_slot *slot = mmc_priv(mmc);
892 struct mmc_data *data = mrq->data;
893
894 if (!slot->host->use_dma || !data)
895 return;
896
897 if (data->host_cookie) {
898 data->host_cookie = 0;
899 return;
900 }
901
902 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
903 data->host_cookie = 0;
904}
905
906static void dw_mci_post_req(struct mmc_host *mmc,
907 struct mmc_request *mrq,
908 int err)
909{
910 struct dw_mci_slot *slot = mmc_priv(mmc);
911 struct mmc_data *data = mrq->data;
912
913 if (!slot->host->use_dma || !data)
914 return;
915
916 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000917 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900918 data->sg,
919 data->sg_len,
920 dw_mci_get_dma_dir(data));
921 data->host_cookie = 0;
922}
923
Seungwon Jeon52426892013-08-31 00:13:42 +0900924static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
925{
Seungwon Jeon52426892013-08-31 00:13:42 +0900926 unsigned int blksz = data->blksz;
927 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
928 u32 fifo_width = 1 << host->data_shift;
929 u32 blksz_depth = blksz / fifo_width, fifoth_val;
930 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
Shawn Lin0e3a22c2015-08-03 15:07:21 +0800931 int idx = ARRAY_SIZE(mszs) - 1;
Seungwon Jeon52426892013-08-31 00:13:42 +0900932
Shawn Lin3fc7eae2015-09-16 14:41:23 +0800933 /* pio should ship this scenario */
934 if (!host->use_dma)
935 return;
936
Seungwon Jeon52426892013-08-31 00:13:42 +0900937 tx_wmark = (host->fifo_depth) / 2;
938 tx_wmark_invers = host->fifo_depth - tx_wmark;
939
940 /*
941 * MSIZE is '1',
942 * if blksz is not a multiple of the FIFO width
943 */
Shawn Lin20753562016-09-21 10:40:25 +0800944 if (blksz % fifo_width)
Seungwon Jeon52426892013-08-31 00:13:42 +0900945 goto done;
Seungwon Jeon52426892013-08-31 00:13:42 +0900946
947 do {
948 if (!((blksz_depth % mszs[idx]) ||
949 (tx_wmark_invers % mszs[idx]))) {
950 msize = idx;
951 rx_wmark = mszs[idx] - 1;
952 break;
953 }
954 } while (--idx > 0);
955 /*
956 * If idx is '0', it won't be tried
957 * Thus, initial values are uesed
958 */
959done:
960 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
961 mci_writel(host, FIFOTH, fifoth_val);
Seungwon Jeon52426892013-08-31 00:13:42 +0900962}
963
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +0900964static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900965{
966 unsigned int blksz = data->blksz;
967 u32 blksz_depth, fifo_depth;
968 u16 thld_size;
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +0900969 u8 enable;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900970
James Hogan66dfd102014-11-17 17:49:05 +0000971 /*
972 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
973 * in the FIFO region, so we really shouldn't access it).
974 */
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +0900975 if (host->verid < DW_MMC_240A ||
976 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
James Hogan66dfd102014-11-17 17:49:05 +0000977 return;
978
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +0900979 /*
980 * Card write Threshold is introduced since 2.80a
981 * It's used when HS400 mode is enabled.
982 */
983 if (data->flags & MMC_DATA_WRITE &&
984 !(host->timing != MMC_TIMING_MMC_HS400))
985 return;
986
987 if (data->flags & MMC_DATA_WRITE)
988 enable = SDMMC_CARD_WR_THR_EN;
989 else
990 enable = SDMMC_CARD_RD_THR_EN;
991
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900992 if (host->timing != MMC_TIMING_MMC_HS200 &&
993 host->timing != MMC_TIMING_UHS_SDR104)
994 goto disable;
995
996 blksz_depth = blksz / (1 << host->data_shift);
997 fifo_depth = host->fifo_depth;
998
999 if (blksz_depth > fifo_depth)
1000 goto disable;
1001
1002 /*
1003 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1004 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1005 * Currently just choose blksz.
1006 */
1007 thld_size = blksz;
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +09001008 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
Seungwon Jeonf1d27362013-08-31 00:13:55 +09001009 return;
1010
1011disable:
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +09001012 mci_writel(host, CDTHRCTL, 0);
Seungwon Jeonf1d27362013-08-31 00:13:55 +09001013}
1014
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001015static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1016{
Doug Andersonf8c58c12014-12-02 15:42:47 -08001017 unsigned long irqflags;
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001018 int sg_len;
1019 u32 temp;
1020
1021 host->using_dma = 0;
1022
1023 /* If we don't have a channel, we can't do DMA */
1024 if (!host->use_dma)
1025 return -ENODEV;
1026
1027 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +09001028 if (sg_len < 0) {
1029 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001030 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +09001031 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001032
James Hogan03e8cb52011-06-29 09:28:43 +01001033 host->using_dma = 1;
1034
Shawn Lin3fc7eae2015-09-16 14:41:23 +08001035 if (host->use_dma == TRANS_MODE_IDMAC)
1036 dev_vdbg(host->dev,
1037 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1038 (unsigned long)host->sg_cpu,
1039 (unsigned long)host->sg_dma,
1040 sg_len);
Will Newtonf95f3852011-01-02 01:11:59 -05001041
Seungwon Jeon52426892013-08-31 00:13:42 +09001042 /*
1043 * Decide the MSIZE and RX/TX Watermark.
1044 * If current block size is same with previous size,
1045 * no need to update fifoth.
1046 */
1047 if (host->prev_blksz != data->blksz)
1048 dw_mci_adjust_fifoth(host, data);
1049
Will Newtonf95f3852011-01-02 01:11:59 -05001050 /* Enable the DMA interface */
1051 temp = mci_readl(host, CTRL);
1052 temp |= SDMMC_CTRL_DMA_ENABLE;
1053 mci_writel(host, CTRL, temp);
1054
1055 /* Disable RX/TX IRQs, let DMA handle it */
Doug Andersonf8c58c12014-12-02 15:42:47 -08001056 spin_lock_irqsave(&host->irq_lock, irqflags);
Will Newtonf95f3852011-01-02 01:11:59 -05001057 temp = mci_readl(host, INTMASK);
1058 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1059 mci_writel(host, INTMASK, temp);
Doug Andersonf8c58c12014-12-02 15:42:47 -08001060 spin_unlock_irqrestore(&host->irq_lock, irqflags);
Will Newtonf95f3852011-01-02 01:11:59 -05001061
Shawn Lin3fc7eae2015-09-16 14:41:23 +08001062 if (host->dma_ops->start(host, sg_len)) {
Jaehoon Chung647f80a2016-11-21 10:51:48 +09001063 host->dma_ops->stop(host);
Shawn Lind12d0cb2016-09-02 12:14:38 +08001064 /* We can't do DMA, try PIO for this one */
1065 dev_dbg(host->dev,
1066 "%s: fall back to PIO mode for current transfer\n",
1067 __func__);
Shawn Lin3fc7eae2015-09-16 14:41:23 +08001068 return -ENODEV;
1069 }
Will Newtonf95f3852011-01-02 01:11:59 -05001070
1071 return 0;
1072}
1073
1074static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1075{
Doug Andersonf8c58c12014-12-02 15:42:47 -08001076 unsigned long irqflags;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08001077 int flags = SG_MITER_ATOMIC;
Will Newtonf95f3852011-01-02 01:11:59 -05001078 u32 temp;
1079
1080 data->error = -EINPROGRESS;
1081
1082 WARN_ON(host->data);
1083 host->sg = NULL;
1084 host->data = data;
1085
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +09001086 if (data->flags & MMC_DATA_READ)
James Hogan55c5efbc2011-06-29 09:29:58 +01001087 host->dir_status = DW_MCI_RECV_STATUS;
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +09001088 else
James Hogan55c5efbc2011-06-29 09:29:58 +01001089 host->dir_status = DW_MCI_SEND_STATUS;
Jaehoon Chung7e4bf1b2016-06-21 14:35:38 +09001090
1091 dw_mci_ctrl_thld(host, data);
James Hogan55c5efbc2011-06-29 09:29:58 +01001092
Will Newtonf95f3852011-01-02 01:11:59 -05001093 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001094 if (host->data->flags & MMC_DATA_READ)
1095 flags |= SG_MITER_TO_SG;
1096 else
1097 flags |= SG_MITER_FROM_SG;
1098
1099 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001100 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +01001101 host->part_buf_start = 0;
1102 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05001103
James Hoganb40af3a2011-06-24 13:54:06 +01001104 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Doug Andersonf8c58c12014-12-02 15:42:47 -08001105
1106 spin_lock_irqsave(&host->irq_lock, irqflags);
Will Newtonf95f3852011-01-02 01:11:59 -05001107 temp = mci_readl(host, INTMASK);
1108 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1109 mci_writel(host, INTMASK, temp);
Doug Andersonf8c58c12014-12-02 15:42:47 -08001110 spin_unlock_irqrestore(&host->irq_lock, irqflags);
Will Newtonf95f3852011-01-02 01:11:59 -05001111
1112 temp = mci_readl(host, CTRL);
1113 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1114 mci_writel(host, CTRL, temp);
Seungwon Jeon52426892013-08-31 00:13:42 +09001115
1116 /*
1117 * Use the initial fifoth_val for PIO mode.
1118 * If next issued data may be transfered by DMA mode,
1119 * prev_blksz should be invalidated.
1120 */
1121 mci_writel(host, FIFOTH, host->fifoth_val);
1122 host->prev_blksz = 0;
1123 } else {
1124 /*
1125 * Keep the current block size.
1126 * It will be used to decide whether to update
1127 * fifoth register next time.
1128 */
1129 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -05001130 }
1131}
1132
1133static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1134{
1135 struct dw_mci *host = slot->host;
1136 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1137 unsigned int cmd_status = 0;
1138
1139 mci_writel(host, CMDARG, arg);
Shawn Lin0e3a22c2015-08-03 15:07:21 +08001140 wmb(); /* drain writebuffer */
Doug Anderson0bdbd0e2015-02-20 12:31:56 -08001141 dw_mci_wait_while_busy(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -05001142 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1143
1144 while (time_before(jiffies, timeout)) {
1145 cmd_status = mci_readl(host, CMD);
1146 if (!(cmd_status & SDMMC_CMD_START))
1147 return;
1148 }
1149 dev_err(&slot->mmc->class_dev,
1150 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1151 cmd, arg, cmd_status);
1152}
1153
Abhilash Kesavanab269122012-11-19 10:26:21 +05301154static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -05001155{
1156 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +09001157 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -05001158 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -07001159 u32 clk_en_a;
Doug Anderson01730552014-08-22 19:17:51 +05301160 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1161
1162 /* We must continue to set bit 28 in CMD until the change is complete */
1163 if (host->state == STATE_WAITING_CMD11_DONE)
1164 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
Will Newtonf95f3852011-01-02 01:11:59 -05001165
Doug Andersonfdf492a2013-08-31 00:11:43 +09001166 if (!clock) {
1167 mci_writel(host, CLKENA, 0);
Doug Anderson01730552014-08-22 19:17:51 +05301168 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
Doug Andersonfdf492a2013-08-31 00:11:43 +09001169 } else if (clock != host->current_speed || force_clkinit) {
1170 div = host->bus_hz / clock;
1171 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -05001172 /*
1173 * move the + 1 after the divide to prevent
1174 * over-clocking the card.
1175 */
Seungwon Jeone4199902012-05-22 13:01:21 +09001176 div += 1;
1177
Doug Andersonfdf492a2013-08-31 00:11:43 +09001178 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -05001179
Jaehoon Chung005d6752016-09-22 14:12:00 +09001180 if (clock != slot->__clk_old || force_clkinit)
1181 dev_info(&slot->mmc->class_dev,
1182 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1183 slot->id, host->bus_hz, clock,
1184 div ? ((host->bus_hz / div) >> 1) :
1185 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -05001186
1187 /* disable clock */
1188 mci_writel(host, CLKENA, 0);
1189 mci_writel(host, CLKSRC, 0);
1190
1191 /* inform CIU */
Doug Anderson01730552014-08-22 19:17:51 +05301192 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
Will Newtonf95f3852011-01-02 01:11:59 -05001193
1194 /* set clock to desired speed */
1195 mci_writel(host, CLKDIV, div);
1196
1197 /* inform CIU */
Doug Anderson01730552014-08-22 19:17:51 +05301198 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
Will Newtonf95f3852011-01-02 01:11:59 -05001199
Doug Anderson9623b5b2012-07-25 08:33:17 -07001200 /* enable clock; only low power if no SDIO */
1201 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
Doug Andersonb24c8b22014-12-02 15:42:46 -08001202 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
Doug Anderson9623b5b2012-07-25 08:33:17 -07001203 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1204 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -05001205
1206 /* inform CIU */
Doug Anderson01730552014-08-22 19:17:51 +05301207 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
Jaehoon Chung005d6752016-09-22 14:12:00 +09001208
1209 /* keep the last clock value that was requested from core */
1210 slot->__clk_old = clock;
Will Newtonf95f3852011-01-02 01:11:59 -05001211 }
1212
Doug Andersonfdf492a2013-08-31 00:11:43 +09001213 host->current_speed = clock;
1214
Will Newtonf95f3852011-01-02 01:11:59 -05001215 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +09001216 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -05001217}
1218
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001219static void __dw_mci_start_request(struct dw_mci *host,
1220 struct dw_mci_slot *slot,
1221 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001222{
1223 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001224 struct mmc_data *data;
1225 u32 cmdflags;
1226
1227 mrq = slot->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001228
Will Newtonf95f3852011-01-02 01:11:59 -05001229 host->cur_slot = slot;
1230 host->mrq = mrq;
1231
1232 host->pending_events = 0;
1233 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +09001234 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05001235 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +09001236 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05001237
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001238 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -05001239 if (data) {
Jaehoon Chungf16afa82014-03-03 11:36:45 +09001240 mci_writel(host, TMOUT, 0xFFFFFFFF);
Will Newtonf95f3852011-01-02 01:11:59 -05001241 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1242 mci_writel(host, BLKSIZ, data->blksz);
1243 }
1244
Will Newtonf95f3852011-01-02 01:11:59 -05001245 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1246
1247 /* this is the first command, send the initialization clock */
1248 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1249 cmdflags |= SDMMC_CMD_INIT;
1250
1251 if (data) {
1252 dw_mci_submit_data(host, data);
Shawn Lin0e3a22c2015-08-03 15:07:21 +08001253 wmb(); /* drain writebuffer */
Will Newtonf95f3852011-01-02 01:11:59 -05001254 }
1255
1256 dw_mci_start_command(host, cmd, cmdflags);
1257
Doug Anderson5c935162015-03-09 16:18:21 -07001258 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
Doug Anderson49ba0302015-04-03 11:13:07 -07001259 unsigned long irqflags;
1260
Doug Anderson5c935162015-03-09 16:18:21 -07001261 /*
Doug Anderson8886a6f2015-04-03 11:13:05 -07001262 * Databook says to fail after 2ms w/ no response, but evidence
1263 * shows that sometimes the cmd11 interrupt takes over 130ms.
1264 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1265 * is just about to roll over.
Doug Anderson49ba0302015-04-03 11:13:07 -07001266 *
1267 * We do this whole thing under spinlock and only if the
1268 * command hasn't already completed (indicating the the irq
1269 * already ran so we don't want the timeout).
Doug Anderson5c935162015-03-09 16:18:21 -07001270 */
Doug Anderson49ba0302015-04-03 11:13:07 -07001271 spin_lock_irqsave(&host->irq_lock, irqflags);
1272 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1273 mod_timer(&host->cmd11_timer,
1274 jiffies + msecs_to_jiffies(500) + 1);
1275 spin_unlock_irqrestore(&host->irq_lock, irqflags);
Doug Anderson5c935162015-03-09 16:18:21 -07001276 }
1277
Will Newtonf95f3852011-01-02 01:11:59 -05001278 if (mrq->stop)
1279 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001280 else
1281 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -05001282}
1283
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001284static void dw_mci_start_request(struct dw_mci *host,
1285 struct dw_mci_slot *slot)
1286{
1287 struct mmc_request *mrq = slot->mrq;
1288 struct mmc_command *cmd;
1289
1290 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1291 __dw_mci_start_request(host, slot, cmd);
1292}
1293
James Hogan7456caa2011-06-24 13:55:10 +01001294/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -05001295static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1296 struct mmc_request *mrq)
1297{
1298 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1299 host->state);
1300
Will Newtonf95f3852011-01-02 01:11:59 -05001301 slot->mrq = mrq;
1302
Doug Anderson01730552014-08-22 19:17:51 +05301303 if (host->state == STATE_WAITING_CMD11_DONE) {
1304 dev_warn(&slot->mmc->class_dev,
1305 "Voltage change didn't complete\n");
1306 /*
1307 * this case isn't expected to happen, so we can
1308 * either crash here or just try to continue on
1309 * in the closest possible state
1310 */
1311 host->state = STATE_IDLE;
1312 }
1313
Will Newtonf95f3852011-01-02 01:11:59 -05001314 if (host->state == STATE_IDLE) {
1315 host->state = STATE_SENDING_CMD;
1316 dw_mci_start_request(host, slot);
1317 } else {
1318 list_add_tail(&slot->queue_node, &host->queue);
1319 }
Will Newtonf95f3852011-01-02 01:11:59 -05001320}
1321
1322static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1323{
1324 struct dw_mci_slot *slot = mmc_priv(mmc);
1325 struct dw_mci *host = slot->host;
1326
1327 WARN_ON(slot->mrq);
1328
James Hogan7456caa2011-06-24 13:55:10 +01001329 /*
1330 * The check for card presence and queueing of the request must be
1331 * atomic, otherwise the card could be removed in between and the
1332 * request wouldn't fail until another card was inserted.
1333 */
James Hogan7456caa2011-06-24 13:55:10 +01001334
Shawn Lin56f69112016-05-27 14:37:05 +08001335 if (!dw_mci_get_cd(mmc)) {
Will Newtonf95f3852011-01-02 01:11:59 -05001336 mrq->cmd->error = -ENOMEDIUM;
1337 mmc_request_done(mmc, mrq);
1338 return;
1339 }
1340
Shawn Lin56f69112016-05-27 14:37:05 +08001341 spin_lock_bh(&host->lock);
1342
Will Newtonf95f3852011-01-02 01:11:59 -05001343 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +01001344
1345 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -05001346}
1347
1348static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1349{
1350 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +00001351 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +09001352 u32 regs;
Yuvaraj CD51da2242014-08-22 19:17:50 +05301353 int ret;
Will Newtonf95f3852011-01-02 01:11:59 -05001354
Will Newtonf95f3852011-01-02 01:11:59 -05001355 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -05001356 case MMC_BUS_WIDTH_4:
1357 slot->ctype = SDMMC_CTYPE_4BIT;
1358 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +09001359 case MMC_BUS_WIDTH_8:
1360 slot->ctype = SDMMC_CTYPE_8BIT;
1361 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +09001362 default:
1363 /* set default 1 bit mode */
1364 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -05001365 }
1366
Seungwon Jeon3f514292012-01-02 16:00:02 +09001367 regs = mci_readl(slot->host, UHS_REG);
1368
Jaehoon Chung41babf72011-02-24 13:46:11 +09001369 /* DDR mode set */
Seungwon Jeon80113132015-01-29 08:11:57 +05301370 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
Jaehoon Chung7cc8d582015-10-21 19:49:42 +09001371 ios->timing == MMC_TIMING_UHS_DDR50 ||
Seungwon Jeon80113132015-01-29 08:11:57 +05301372 ios->timing == MMC_TIMING_MMC_HS400)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +09001373 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +09001374 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +09001375 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +09001376
1377 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +09001378 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +09001379
Doug Andersonfdf492a2013-08-31 00:11:43 +09001380 /*
1381 * Use mirror of ios->clock to prevent race with mmc
1382 * core ios update when finding the minimum.
1383 */
1384 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -05001385
James Hogancb27a842012-10-16 09:43:08 +01001386 if (drv_data && drv_data->set_ios)
1387 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +00001388
Will Newtonf95f3852011-01-02 01:11:59 -05001389 switch (ios->power_mode) {
1390 case MMC_POWER_UP:
Yuvaraj CD51da2242014-08-22 19:17:50 +05301391 if (!IS_ERR(mmc->supply.vmmc)) {
1392 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1393 ios->vdd);
1394 if (ret) {
1395 dev_err(slot->host->dev,
1396 "failed to enable vmmc regulator\n");
1397 /*return, if failed turn on vmmc*/
1398 return;
1399 }
1400 }
Doug Anderson29d0d162015-01-13 15:58:44 -08001401 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1402 regs = mci_readl(slot->host, PWREN);
1403 regs |= (1 << slot->id);
1404 mci_writel(slot->host, PWREN, regs);
1405 break;
1406 case MMC_POWER_ON:
Doug Andersond1f1dd82015-02-20 10:57:19 -08001407 if (!slot->host->vqmmc_enabled) {
1408 if (!IS_ERR(mmc->supply.vqmmc)) {
1409 ret = regulator_enable(mmc->supply.vqmmc);
1410 if (ret < 0)
1411 dev_err(slot->host->dev,
1412 "failed to enable vqmmc\n");
1413 else
1414 slot->host->vqmmc_enabled = true;
1415
1416 } else {
1417 /* Keep track so we don't reset again */
Yuvaraj CD51da2242014-08-22 19:17:50 +05301418 slot->host->vqmmc_enabled = true;
Doug Andersond1f1dd82015-02-20 10:57:19 -08001419 }
1420
1421 /* Reset our state machine after powering on */
1422 dw_mci_ctrl_reset(slot->host,
1423 SDMMC_CTRL_ALL_RESET_FLAGS);
Yuvaraj CD51da2242014-08-22 19:17:50 +05301424 }
Doug Anderson655babb2015-02-20 10:57:18 -08001425
1426 /* Adjust clock / bus width after power is up */
1427 dw_mci_setup_bus(slot, false);
1428
James Hogane6f34e22013-03-12 10:43:32 +00001429 break;
1430 case MMC_POWER_OFF:
Doug Anderson655babb2015-02-20 10:57:18 -08001431 /* Turn clock off before power goes down */
1432 dw_mci_setup_bus(slot, false);
1433
Yuvaraj CD51da2242014-08-22 19:17:50 +05301434 if (!IS_ERR(mmc->supply.vmmc))
1435 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1436
Doug Andersond1f1dd82015-02-20 10:57:19 -08001437 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
Yuvaraj CD51da2242014-08-22 19:17:50 +05301438 regulator_disable(mmc->supply.vqmmc);
Doug Andersond1f1dd82015-02-20 10:57:19 -08001439 slot->host->vqmmc_enabled = false;
Yuvaraj CD51da2242014-08-22 19:17:50 +05301440
Jaehoon Chung4366dcc2013-03-26 21:36:14 +09001441 regs = mci_readl(slot->host, PWREN);
1442 regs &= ~(1 << slot->id);
1443 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -05001444 break;
1445 default:
1446 break;
1447 }
Doug Anderson655babb2015-02-20 10:57:18 -08001448
1449 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1450 slot->host->state = STATE_IDLE;
Will Newtonf95f3852011-01-02 01:11:59 -05001451}
1452
Doug Anderson01730552014-08-22 19:17:51 +05301453static int dw_mci_card_busy(struct mmc_host *mmc)
1454{
1455 struct dw_mci_slot *slot = mmc_priv(mmc);
1456 u32 status;
1457
1458 /*
1459 * Check the busy bit which is low when DAT[3:0]
1460 * (the data lines) are 0000
1461 */
1462 status = mci_readl(slot->host, STATUS);
1463
1464 return !!(status & SDMMC_STATUS_BUSY);
1465}
1466
1467static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1468{
1469 struct dw_mci_slot *slot = mmc_priv(mmc);
1470 struct dw_mci *host = slot->host;
Zhangfei Gao8f7849c2015-05-14 16:45:18 +08001471 const struct dw_mci_drv_data *drv_data = host->drv_data;
Doug Anderson01730552014-08-22 19:17:51 +05301472 u32 uhs;
1473 u32 v18 = SDMMC_UHS_18V << slot->id;
Doug Anderson01730552014-08-22 19:17:51 +05301474 int ret;
1475
Zhangfei Gao8f7849c2015-05-14 16:45:18 +08001476 if (drv_data && drv_data->switch_voltage)
1477 return drv_data->switch_voltage(mmc, ios);
1478
Doug Anderson01730552014-08-22 19:17:51 +05301479 /*
1480 * Program the voltage. Note that some instances of dw_mmc may use
1481 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1482 * does no harm but you need to set the regulator directly. Try both.
1483 */
1484 uhs = mci_readl(host, UHS_REG);
Douglas Andersone0848f52015-10-12 14:48:26 +02001485 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
Doug Anderson01730552014-08-22 19:17:51 +05301486 uhs &= ~v18;
Douglas Andersone0848f52015-10-12 14:48:26 +02001487 else
Doug Anderson01730552014-08-22 19:17:51 +05301488 uhs |= v18;
Douglas Andersone0848f52015-10-12 14:48:26 +02001489
Doug Anderson01730552014-08-22 19:17:51 +05301490 if (!IS_ERR(mmc->supply.vqmmc)) {
Douglas Andersone0848f52015-10-12 14:48:26 +02001491 ret = mmc_regulator_set_vqmmc(mmc, ios);
Doug Anderson01730552014-08-22 19:17:51 +05301492
1493 if (ret) {
Doug Andersonb19caf32014-10-10 21:16:16 -07001494 dev_dbg(&mmc->class_dev,
Douglas Andersone0848f52015-10-12 14:48:26 +02001495 "Regulator set error %d - %s V\n",
1496 ret, uhs & v18 ? "1.8" : "3.3");
Doug Anderson01730552014-08-22 19:17:51 +05301497 return ret;
1498 }
1499 }
1500 mci_writel(host, UHS_REG, uhs);
1501
1502 return 0;
1503}
1504
Will Newtonf95f3852011-01-02 01:11:59 -05001505static int dw_mci_get_ro(struct mmc_host *mmc)
1506{
1507 int read_only;
1508 struct dw_mci_slot *slot = mmc_priv(mmc);
Jaehoon Chung9795a842014-03-03 11:36:46 +09001509 int gpio_ro = mmc_gpio_get_ro(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001510
1511 /* Use platform get_ro function, else try on board write protect */
Arnd Bergmann287980e2016-05-27 23:23:25 +02001512 if (gpio_ro >= 0)
Jaehoon Chung9795a842014-03-03 11:36:46 +09001513 read_only = gpio_ro;
Will Newtonf95f3852011-01-02 01:11:59 -05001514 else
1515 read_only =
1516 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1517
1518 dev_dbg(&mmc->class_dev, "card is %s\n",
1519 read_only ? "read-only" : "read-write");
1520
1521 return read_only;
1522}
1523
1524static int dw_mci_get_cd(struct mmc_host *mmc)
1525{
1526 int present;
1527 struct dw_mci_slot *slot = mmc_priv(mmc);
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001528 struct dw_mci *host = slot->host;
1529 int gpio_cd = mmc_gpio_get_cd(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001530
1531 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chung860951c2016-06-21 10:13:26 +09001532 if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc))
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001533 present = 1;
Arnd Bergmann287980e2016-05-27 23:23:25 +02001534 else if (gpio_cd >= 0)
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001535 present = gpio_cd;
Will Newtonf95f3852011-01-02 01:11:59 -05001536 else
1537 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1538 == 0 ? 1 : 0;
1539
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001540 spin_lock_bh(&host->lock);
Jaehoon Chung1f4d5072016-11-17 16:40:34 +09001541 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
Will Newtonf95f3852011-01-02 01:11:59 -05001542 dev_dbg(&mmc->class_dev, "card is present\n");
Jaehoon Chung1f4d5072016-11-17 16:40:34 +09001543 else if (!test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
Will Newtonf95f3852011-01-02 01:11:59 -05001544 dev_dbg(&mmc->class_dev, "card is not present\n");
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001545 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -05001546
1547 return present;
1548}
1549
Shawn Lin935a6652016-01-14 09:08:02 +08001550static void dw_mci_hw_reset(struct mmc_host *mmc)
1551{
1552 struct dw_mci_slot *slot = mmc_priv(mmc);
1553 struct dw_mci *host = slot->host;
1554 int reset;
1555
1556 if (host->use_dma == TRANS_MODE_IDMAC)
1557 dw_mci_idmac_reset(host);
1558
1559 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1560 SDMMC_CTRL_FIFO_RESET))
1561 return;
1562
1563 /*
1564 * According to eMMC spec, card reset procedure:
1565 * tRstW >= 1us: RST_n pulse width
1566 * tRSCA >= 200us: RST_n to Command time
1567 * tRSTH >= 1us: RST_n high period
1568 */
1569 reset = mci_readl(host, RST_N);
1570 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1571 mci_writel(host, RST_N, reset);
1572 usleep_range(1, 2);
1573 reset |= SDMMC_RST_HWACTIVE << slot->id;
1574 mci_writel(host, RST_N, reset);
1575 usleep_range(200, 300);
1576}
1577
Doug Andersonb24c8b22014-12-02 15:42:46 -08001578static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
Doug Anderson9623b5b2012-07-25 08:33:17 -07001579{
Doug Andersonb24c8b22014-12-02 15:42:46 -08001580 struct dw_mci_slot *slot = mmc_priv(mmc);
Doug Anderson9623b5b2012-07-25 08:33:17 -07001581 struct dw_mci *host = slot->host;
Doug Anderson9623b5b2012-07-25 08:33:17 -07001582
Doug Andersonb24c8b22014-12-02 15:42:46 -08001583 /*
1584 * Low power mode will stop the card clock when idle. According to the
1585 * description of the CLKENA register we should disable low power mode
1586 * for SDIO cards if we need SDIO interrupts to work.
1587 */
1588 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1589 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1590 u32 clk_en_a_old;
1591 u32 clk_en_a;
Doug Anderson9623b5b2012-07-25 08:33:17 -07001592
Doug Andersonb24c8b22014-12-02 15:42:46 -08001593 clk_en_a_old = mci_readl(host, CLKENA);
1594
1595 if (card->type == MMC_TYPE_SDIO ||
1596 card->type == MMC_TYPE_SD_COMBO) {
1597 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1598 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1599 } else {
1600 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1601 clk_en_a = clk_en_a_old | clken_low_pwr;
1602 }
1603
1604 if (clk_en_a != clk_en_a_old) {
1605 mci_writel(host, CLKENA, clk_en_a);
1606 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1607 SDMMC_CMD_PRV_DAT_WAIT, 0);
1608 }
Doug Anderson9623b5b2012-07-25 08:33:17 -07001609 }
1610}
1611
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301612static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1613{
1614 struct dw_mci_slot *slot = mmc_priv(mmc);
1615 struct dw_mci *host = slot->host;
Doug Andersonf8c58c12014-12-02 15:42:47 -08001616 unsigned long irqflags;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301617 u32 int_mask;
1618
Doug Andersonf8c58c12014-12-02 15:42:47 -08001619 spin_lock_irqsave(&host->irq_lock, irqflags);
1620
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301621 /* Enable/disable Slot Specific SDIO interrupt */
1622 int_mask = mci_readl(host, INTMASK);
Doug Andersonb24c8b22014-12-02 15:42:46 -08001623 if (enb)
1624 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1625 else
1626 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1627 mci_writel(host, INTMASK, int_mask);
Doug Andersonf8c58c12014-12-02 15:42:47 -08001628
1629 spin_unlock_irqrestore(&host->irq_lock, irqflags);
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301630}
1631
Seungwon Jeon0976f162013-08-31 00:12:42 +09001632static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1633{
1634 struct dw_mci_slot *slot = mmc_priv(mmc);
1635 struct dw_mci *host = slot->host;
1636 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08001637 int err = -EINVAL;
Seungwon Jeon0976f162013-08-31 00:12:42 +09001638
Seungwon Jeon0976f162013-08-31 00:12:42 +09001639 if (drv_data && drv_data->execute_tuning)
Chaotian Jing9979dbe2015-10-27 14:24:28 +08001640 err = drv_data->execute_tuning(slot, opcode);
Seungwon Jeon0976f162013-08-31 00:12:42 +09001641 return err;
1642}
1643
Shawn Lin0e3a22c2015-08-03 15:07:21 +08001644static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1645 struct mmc_ios *ios)
Seungwon Jeon80113132015-01-29 08:11:57 +05301646{
1647 struct dw_mci_slot *slot = mmc_priv(mmc);
1648 struct dw_mci *host = slot->host;
1649 const struct dw_mci_drv_data *drv_data = host->drv_data;
1650
1651 if (drv_data && drv_data->prepare_hs400_tuning)
1652 return drv_data->prepare_hs400_tuning(host, ios);
1653
1654 return 0;
1655}
1656
Will Newtonf95f3852011-01-02 01:11:59 -05001657static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301658 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001659 .pre_req = dw_mci_pre_req,
1660 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301661 .set_ios = dw_mci_set_ios,
1662 .get_ro = dw_mci_get_ro,
1663 .get_cd = dw_mci_get_cd,
Shawn Lin935a6652016-01-14 09:08:02 +08001664 .hw_reset = dw_mci_hw_reset,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301665 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001666 .execute_tuning = dw_mci_execute_tuning,
Doug Anderson01730552014-08-22 19:17:51 +05301667 .card_busy = dw_mci_card_busy,
1668 .start_signal_voltage_switch = dw_mci_switch_voltage,
Doug Andersonb24c8b22014-12-02 15:42:46 -08001669 .init_card = dw_mci_init_card,
Seungwon Jeon80113132015-01-29 08:11:57 +05301670 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001671};
1672
1673static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1674 __releases(&host->lock)
1675 __acquires(&host->lock)
1676{
1677 struct dw_mci_slot *slot;
1678 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1679
1680 WARN_ON(host->cmd || host->data);
1681
1682 host->cur_slot->mrq = NULL;
1683 host->mrq = NULL;
1684 if (!list_empty(&host->queue)) {
1685 slot = list_entry(host->queue.next,
1686 struct dw_mci_slot, queue_node);
1687 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001688 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001689 mmc_hostname(slot->mmc));
1690 host->state = STATE_SENDING_CMD;
1691 dw_mci_start_request(host, slot);
1692 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001693 dev_vdbg(host->dev, "list empty\n");
Doug Anderson01730552014-08-22 19:17:51 +05301694
1695 if (host->state == STATE_SENDING_CMD11)
1696 host->state = STATE_WAITING_CMD11_DONE;
1697 else
1698 host->state = STATE_IDLE;
Will Newtonf95f3852011-01-02 01:11:59 -05001699 }
1700
1701 spin_unlock(&host->lock);
1702 mmc_request_done(prev_mmc, mrq);
1703 spin_lock(&host->lock);
1704}
1705
Seungwon Jeone352c812013-08-31 00:14:17 +09001706static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001707{
1708 u32 status = host->cmd_status;
1709
1710 host->cmd_status = 0;
1711
1712 /* Read the response from the card (up to 16 bytes) */
1713 if (cmd->flags & MMC_RSP_PRESENT) {
1714 if (cmd->flags & MMC_RSP_136) {
1715 cmd->resp[3] = mci_readl(host, RESP0);
1716 cmd->resp[2] = mci_readl(host, RESP1);
1717 cmd->resp[1] = mci_readl(host, RESP2);
1718 cmd->resp[0] = mci_readl(host, RESP3);
1719 } else {
1720 cmd->resp[0] = mci_readl(host, RESP0);
1721 cmd->resp[1] = 0;
1722 cmd->resp[2] = 0;
1723 cmd->resp[3] = 0;
1724 }
1725 }
1726
1727 if (status & SDMMC_INT_RTO)
1728 cmd->error = -ETIMEDOUT;
1729 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1730 cmd->error = -EILSEQ;
1731 else if (status & SDMMC_INT_RESP_ERR)
1732 cmd->error = -EIO;
1733 else
1734 cmd->error = 0;
1735
Seungwon Jeone352c812013-08-31 00:14:17 +09001736 return cmd->error;
1737}
1738
1739static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1740{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001741 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001742
1743 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1744 if (status & SDMMC_INT_DRTO) {
1745 data->error = -ETIMEDOUT;
1746 } else if (status & SDMMC_INT_DCRC) {
1747 data->error = -EILSEQ;
1748 } else if (status & SDMMC_INT_EBE) {
1749 if (host->dir_status ==
1750 DW_MCI_SEND_STATUS) {
1751 /*
1752 * No data CRC status was returned.
1753 * The number of bytes transferred
1754 * will be exaggerated in PIO mode.
1755 */
1756 data->bytes_xfered = 0;
1757 data->error = -ETIMEDOUT;
1758 } else if (host->dir_status ==
1759 DW_MCI_RECV_STATUS) {
Shawn Line7a1dec2016-08-22 10:57:16 +08001760 data->error = -EILSEQ;
Seungwon Jeone352c812013-08-31 00:14:17 +09001761 }
1762 } else {
1763 /* SDMMC_INT_SBE is included */
Shawn Line7a1dec2016-08-22 10:57:16 +08001764 data->error = -EILSEQ;
Seungwon Jeone352c812013-08-31 00:14:17 +09001765 }
1766
Doug Andersone6cc0122014-04-22 16:51:21 -07001767 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
Seungwon Jeone352c812013-08-31 00:14:17 +09001768
1769 /*
1770 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001771 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001772 */
Sonny Rao3a33a942014-08-04 18:19:50 -07001773 dw_mci_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001774 } else {
1775 data->bytes_xfered = data->blocks * data->blksz;
1776 data->error = 0;
1777 }
1778
1779 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001780}
1781
Addy Ke57e10482015-08-11 01:27:18 +09001782static void dw_mci_set_drto(struct dw_mci *host)
1783{
1784 unsigned int drto_clks;
1785 unsigned int drto_ms;
1786
1787 drto_clks = mci_readl(host, TMOUT) >> 8;
1788 drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
1789
1790 /* add a bit spare time */
1791 drto_ms += 10;
1792
1793 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
1794}
1795
Will Newtonf95f3852011-01-02 01:11:59 -05001796static void dw_mci_tasklet_func(unsigned long priv)
1797{
1798 struct dw_mci *host = (struct dw_mci *)priv;
1799 struct mmc_data *data;
1800 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001801 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001802 enum dw_mci_state state;
1803 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001804 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001805
1806 spin_lock(&host->lock);
1807
1808 state = host->state;
1809 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001810 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001811
1812 do {
1813 prev_state = state;
1814
1815 switch (state) {
1816 case STATE_IDLE:
Doug Anderson01730552014-08-22 19:17:51 +05301817 case STATE_WAITING_CMD11_DONE:
Will Newtonf95f3852011-01-02 01:11:59 -05001818 break;
1819
Doug Anderson01730552014-08-22 19:17:51 +05301820 case STATE_SENDING_CMD11:
Will Newtonf95f3852011-01-02 01:11:59 -05001821 case STATE_SENDING_CMD:
1822 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1823 &host->pending_events))
1824 break;
1825
1826 cmd = host->cmd;
1827 host->cmd = NULL;
1828 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001829 err = dw_mci_command_complete(host, cmd);
1830 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001831 prev_state = state = STATE_SENDING_CMD;
1832 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001833 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001834 goto unlock;
1835 }
1836
Seungwon Jeone352c812013-08-31 00:14:17 +09001837 if (cmd->data && err) {
Doug Anderson46d17952016-04-26 10:03:58 +02001838 /*
1839 * During UHS tuning sequence, sending the stop
1840 * command after the response CRC error would
1841 * throw the system into a confused state
1842 * causing all future tuning phases to report
1843 * failure.
1844 *
1845 * In such case controller will move into a data
1846 * transfer state after a response error or
1847 * response CRC error. Let's let that finish
1848 * before trying to send a stop, so we'll go to
1849 * STATE_SENDING_DATA.
1850 *
1851 * Although letting the data transfer take place
1852 * will waste a bit of time (we already know
1853 * the command was bad), it can't cause any
1854 * errors since it's possible it would have
1855 * taken place anyway if this tasklet got
1856 * delayed. Allowing the transfer to take place
1857 * avoids races and keeps things simple.
1858 */
1859 if ((err != -ETIMEDOUT) &&
1860 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
1861 state = STATE_SENDING_DATA;
1862 continue;
1863 }
1864
Seungwon Jeon71abb132013-08-31 00:13:59 +09001865 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001866 send_stop_abort(host, data);
1867 state = STATE_SENDING_STOP;
1868 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001869 }
1870
Seungwon Jeone352c812013-08-31 00:14:17 +09001871 if (!cmd->data || err) {
1872 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001873 goto unlock;
1874 }
1875
1876 prev_state = state = STATE_SENDING_DATA;
1877 /* fall through */
1878
1879 case STATE_SENDING_DATA:
Doug Anderson2aa35462014-08-13 08:13:43 -07001880 /*
1881 * We could get a data error and never a transfer
1882 * complete so we'd better check for it here.
1883 *
1884 * Note that we don't really care if we also got a
1885 * transfer complete; stopping the DMA and sending an
1886 * abort won't hurt.
1887 */
Will Newtonf95f3852011-01-02 01:11:59 -05001888 if (test_and_clear_bit(EVENT_DATA_ERROR,
1889 &host->pending_events)) {
1890 dw_mci_stop_dma(host);
addy kebdb9a902015-02-20 10:55:25 +08001891 if (data->stop ||
1892 !(host->data_status & (SDMMC_INT_DRTO |
1893 SDMMC_INT_EBE)))
1894 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001895 state = STATE_DATA_ERROR;
1896 break;
1897 }
1898
1899 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
Addy Ke57e10482015-08-11 01:27:18 +09001900 &host->pending_events)) {
1901 /*
1902 * If all data-related interrupts don't come
1903 * within the given time in reading data state.
1904 */
Jaehoon Chung16a34572016-06-21 14:35:37 +09001905 if (host->dir_status == DW_MCI_RECV_STATUS)
Addy Ke57e10482015-08-11 01:27:18 +09001906 dw_mci_set_drto(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001907 break;
Addy Ke57e10482015-08-11 01:27:18 +09001908 }
Will Newtonf95f3852011-01-02 01:11:59 -05001909
1910 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
Doug Anderson2aa35462014-08-13 08:13:43 -07001911
1912 /*
1913 * Handle an EVENT_DATA_ERROR that might have shown up
1914 * before the transfer completed. This might not have
1915 * been caught by the check above because the interrupt
1916 * could have gone off between the previous check and
1917 * the check for transfer complete.
1918 *
1919 * Technically this ought not be needed assuming we
1920 * get a DATA_COMPLETE eventually (we'll notice the
1921 * error and end the request), but it shouldn't hurt.
1922 *
1923 * This has the advantage of sending the stop command.
1924 */
1925 if (test_and_clear_bit(EVENT_DATA_ERROR,
1926 &host->pending_events)) {
1927 dw_mci_stop_dma(host);
addy kebdb9a902015-02-20 10:55:25 +08001928 if (data->stop ||
1929 !(host->data_status & (SDMMC_INT_DRTO |
1930 SDMMC_INT_EBE)))
1931 send_stop_abort(host, data);
Doug Anderson2aa35462014-08-13 08:13:43 -07001932 state = STATE_DATA_ERROR;
1933 break;
1934 }
Will Newtonf95f3852011-01-02 01:11:59 -05001935 prev_state = state = STATE_DATA_BUSY;
Doug Anderson2aa35462014-08-13 08:13:43 -07001936
Will Newtonf95f3852011-01-02 01:11:59 -05001937 /* fall through */
1938
1939 case STATE_DATA_BUSY:
1940 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
Addy Ke57e10482015-08-11 01:27:18 +09001941 &host->pending_events)) {
1942 /*
1943 * If data error interrupt comes but data over
1944 * interrupt doesn't come within the given time.
1945 * in reading data state.
1946 */
Jaehoon Chung16a34572016-06-21 14:35:37 +09001947 if (host->dir_status == DW_MCI_RECV_STATUS)
Addy Ke57e10482015-08-11 01:27:18 +09001948 dw_mci_set_drto(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001949 break;
Addy Ke57e10482015-08-11 01:27:18 +09001950 }
Will Newtonf95f3852011-01-02 01:11:59 -05001951
1952 host->data = NULL;
1953 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001954 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001955
Seungwon Jeone352c812013-08-31 00:14:17 +09001956 if (!err) {
1957 if (!data->stop || mrq->sbc) {
Sachin Kamat17c8bc82014-02-25 15:18:28 +05301958 if (mrq->sbc && data->stop)
Seungwon Jeone352c812013-08-31 00:14:17 +09001959 data->stop->error = 0;
1960 dw_mci_request_end(host, mrq);
1961 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001962 }
Will Newtonf95f3852011-01-02 01:11:59 -05001963
Seungwon Jeon90c21432013-08-31 00:14:05 +09001964 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001965 if (data->stop)
1966 send_stop_abort(host, data);
Doug Anderson2aa35462014-08-13 08:13:43 -07001967 } else {
1968 /*
1969 * If we don't have a command complete now we'll
1970 * never get one since we just reset everything;
1971 * better end the request.
1972 *
1973 * If we do have a command complete we'll fall
1974 * through to the SENDING_STOP command and
1975 * everything will be peachy keen.
1976 */
1977 if (!test_bit(EVENT_CMD_COMPLETE,
1978 &host->pending_events)) {
1979 host->cmd = NULL;
1980 dw_mci_request_end(host, mrq);
1981 goto unlock;
1982 }
Seungwon Jeon90c21432013-08-31 00:14:05 +09001983 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001984
1985 /*
1986 * If err has non-zero,
1987 * stop-abort command has been already issued.
1988 */
1989 prev_state = state = STATE_SENDING_STOP;
1990
Will Newtonf95f3852011-01-02 01:11:59 -05001991 /* fall through */
1992
1993 case STATE_SENDING_STOP:
1994 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1995 &host->pending_events))
1996 break;
1997
Seungwon Jeon71abb132013-08-31 00:13:59 +09001998 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001999 if (mrq->cmd->error && mrq->data)
Sonny Rao3a33a942014-08-04 18:19:50 -07002000 dw_mci_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09002001
Will Newtonf95f3852011-01-02 01:11:59 -05002002 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09002003 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09002004
Seungwon Jeone352c812013-08-31 00:14:17 +09002005 if (mrq->stop)
2006 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09002007 else
2008 host->cmd_status = 0;
2009
Seungwon Jeone352c812013-08-31 00:14:17 +09002010 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05002011 goto unlock;
2012
2013 case STATE_DATA_ERROR:
2014 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2015 &host->pending_events))
2016 break;
2017
2018 state = STATE_DATA_BUSY;
2019 break;
2020 }
2021 } while (state != prev_state);
2022
2023 host->state = state;
2024unlock:
2025 spin_unlock(&host->lock);
2026
2027}
2028
James Hogan34b664a2011-06-24 13:57:56 +01002029/* push final bytes to part_buf, only use during push */
2030static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2031{
2032 memcpy((void *)&host->part_buf, buf, cnt);
2033 host->part_buf_count = cnt;
2034}
2035
2036/* append bytes to part_buf, only use during push */
2037static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2038{
2039 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2040 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2041 host->part_buf_count += cnt;
2042 return cnt;
2043}
2044
2045/* pull first bytes from part_buf, only use during pull */
2046static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2047{
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002048 cnt = min_t(int, cnt, host->part_buf_count);
James Hogan34b664a2011-06-24 13:57:56 +01002049 if (cnt) {
2050 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2051 cnt);
2052 host->part_buf_count -= cnt;
2053 host->part_buf_start += cnt;
2054 }
2055 return cnt;
2056}
2057
2058/* pull final bytes from the part_buf, assuming it's just been filled */
2059static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2060{
2061 memcpy(buf, &host->part_buf, cnt);
2062 host->part_buf_start = cnt;
2063 host->part_buf_count = (1 << host->data_shift) - cnt;
2064}
2065
Will Newtonf95f3852011-01-02 01:11:59 -05002066static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2067{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002068 struct mmc_data *data = host->data;
2069 int init_cnt = cnt;
2070
James Hogan34b664a2011-06-24 13:57:56 +01002071 /* try and push anything in the part_buf */
2072 if (unlikely(host->part_buf_count)) {
2073 int len = dw_mci_push_part_bytes(host, buf, cnt);
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002074
James Hogan34b664a2011-06-24 13:57:56 +01002075 buf += len;
2076 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002077 if (host->part_buf_count == 2) {
Ben Dooks76184ac2015-03-25 11:27:52 +00002078 mci_fifo_writew(host->fifo_reg, host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01002079 host->part_buf_count = 0;
2080 }
2081 }
2082#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2083 if (unlikely((unsigned long)buf & 0x1)) {
2084 while (cnt >= 2) {
2085 u16 aligned_buf[64];
2086 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2087 int items = len >> 1;
2088 int i;
2089 /* memcpy from input buffer into aligned buffer */
2090 memcpy(aligned_buf, buf, len);
2091 buf += len;
2092 cnt -= len;
2093 /* push data from aligned buffer into fifo */
2094 for (i = 0; i < items; ++i)
Ben Dooks76184ac2015-03-25 11:27:52 +00002095 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01002096 }
2097 } else
2098#endif
2099 {
2100 u16 *pdata = buf;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002101
James Hogan34b664a2011-06-24 13:57:56 +01002102 for (; cnt >= 2; cnt -= 2)
Ben Dooks76184ac2015-03-25 11:27:52 +00002103 mci_fifo_writew(host->fifo_reg, *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01002104 buf = pdata;
2105 }
2106 /* put anything remaining in the part_buf */
2107 if (cnt) {
2108 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002109 /* Push data if we have reached the expected data length */
2110 if ((data->bytes_xfered + init_cnt) ==
2111 (data->blksz * data->blocks))
Ben Dooks76184ac2015-03-25 11:27:52 +00002112 mci_fifo_writew(host->fifo_reg, host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05002113 }
2114}
2115
2116static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2117{
James Hogan34b664a2011-06-24 13:57:56 +01002118#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2119 if (unlikely((unsigned long)buf & 0x1)) {
2120 while (cnt >= 2) {
2121 /* pull data from fifo into aligned buffer */
2122 u16 aligned_buf[64];
2123 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2124 int items = len >> 1;
2125 int i;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002126
James Hogan34b664a2011-06-24 13:57:56 +01002127 for (i = 0; i < items; ++i)
Ben Dooks76184ac2015-03-25 11:27:52 +00002128 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002129 /* memcpy from aligned buffer into output buffer */
2130 memcpy(buf, aligned_buf, len);
2131 buf += len;
2132 cnt -= len;
2133 }
2134 } else
2135#endif
2136 {
2137 u16 *pdata = buf;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002138
James Hogan34b664a2011-06-24 13:57:56 +01002139 for (; cnt >= 2; cnt -= 2)
Ben Dooks76184ac2015-03-25 11:27:52 +00002140 *pdata++ = mci_fifo_readw(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002141 buf = pdata;
2142 }
2143 if (cnt) {
Ben Dooks76184ac2015-03-25 11:27:52 +00002144 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002145 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05002146 }
2147}
2148
2149static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2150{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002151 struct mmc_data *data = host->data;
2152 int init_cnt = cnt;
2153
James Hogan34b664a2011-06-24 13:57:56 +01002154 /* try and push anything in the part_buf */
2155 if (unlikely(host->part_buf_count)) {
2156 int len = dw_mci_push_part_bytes(host, buf, cnt);
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002157
James Hogan34b664a2011-06-24 13:57:56 +01002158 buf += len;
2159 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002160 if (host->part_buf_count == 4) {
Ben Dooks76184ac2015-03-25 11:27:52 +00002161 mci_fifo_writel(host->fifo_reg, host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01002162 host->part_buf_count = 0;
2163 }
2164 }
2165#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2166 if (unlikely((unsigned long)buf & 0x3)) {
2167 while (cnt >= 4) {
2168 u32 aligned_buf[32];
2169 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2170 int items = len >> 2;
2171 int i;
2172 /* memcpy from input buffer into aligned buffer */
2173 memcpy(aligned_buf, buf, len);
2174 buf += len;
2175 cnt -= len;
2176 /* push data from aligned buffer into fifo */
2177 for (i = 0; i < items; ++i)
Ben Dooks76184ac2015-03-25 11:27:52 +00002178 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01002179 }
2180 } else
2181#endif
2182 {
2183 u32 *pdata = buf;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002184
James Hogan34b664a2011-06-24 13:57:56 +01002185 for (; cnt >= 4; cnt -= 4)
Ben Dooks76184ac2015-03-25 11:27:52 +00002186 mci_fifo_writel(host->fifo_reg, *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01002187 buf = pdata;
2188 }
2189 /* put anything remaining in the part_buf */
2190 if (cnt) {
2191 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002192 /* Push data if we have reached the expected data length */
2193 if ((data->bytes_xfered + init_cnt) ==
2194 (data->blksz * data->blocks))
Ben Dooks76184ac2015-03-25 11:27:52 +00002195 mci_fifo_writel(host->fifo_reg, host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05002196 }
2197}
2198
2199static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2200{
James Hogan34b664a2011-06-24 13:57:56 +01002201#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2202 if (unlikely((unsigned long)buf & 0x3)) {
2203 while (cnt >= 4) {
2204 /* pull data from fifo into aligned buffer */
2205 u32 aligned_buf[32];
2206 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2207 int items = len >> 2;
2208 int i;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002209
James Hogan34b664a2011-06-24 13:57:56 +01002210 for (i = 0; i < items; ++i)
Ben Dooks76184ac2015-03-25 11:27:52 +00002211 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002212 /* memcpy from aligned buffer into output buffer */
2213 memcpy(buf, aligned_buf, len);
2214 buf += len;
2215 cnt -= len;
2216 }
2217 } else
2218#endif
2219 {
2220 u32 *pdata = buf;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002221
James Hogan34b664a2011-06-24 13:57:56 +01002222 for (; cnt >= 4; cnt -= 4)
Ben Dooks76184ac2015-03-25 11:27:52 +00002223 *pdata++ = mci_fifo_readl(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002224 buf = pdata;
2225 }
2226 if (cnt) {
Ben Dooks76184ac2015-03-25 11:27:52 +00002227 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002228 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05002229 }
2230}
2231
2232static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2233{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002234 struct mmc_data *data = host->data;
2235 int init_cnt = cnt;
2236
James Hogan34b664a2011-06-24 13:57:56 +01002237 /* try and push anything in the part_buf */
2238 if (unlikely(host->part_buf_count)) {
2239 int len = dw_mci_push_part_bytes(host, buf, cnt);
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002240
James Hogan34b664a2011-06-24 13:57:56 +01002241 buf += len;
2242 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09002243
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002244 if (host->part_buf_count == 8) {
Ben Dooks76184ac2015-03-25 11:27:52 +00002245 mci_fifo_writeq(host->fifo_reg, host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01002246 host->part_buf_count = 0;
2247 }
2248 }
2249#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2250 if (unlikely((unsigned long)buf & 0x7)) {
2251 while (cnt >= 8) {
2252 u64 aligned_buf[16];
2253 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2254 int items = len >> 3;
2255 int i;
2256 /* memcpy from input buffer into aligned buffer */
2257 memcpy(aligned_buf, buf, len);
2258 buf += len;
2259 cnt -= len;
2260 /* push data from aligned buffer into fifo */
2261 for (i = 0; i < items; ++i)
Ben Dooks76184ac2015-03-25 11:27:52 +00002262 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01002263 }
2264 } else
2265#endif
2266 {
2267 u64 *pdata = buf;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002268
James Hogan34b664a2011-06-24 13:57:56 +01002269 for (; cnt >= 8; cnt -= 8)
Ben Dooks76184ac2015-03-25 11:27:52 +00002270 mci_fifo_writeq(host->fifo_reg, *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01002271 buf = pdata;
2272 }
2273 /* put anything remaining in the part_buf */
2274 if (cnt) {
2275 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00002276 /* Push data if we have reached the expected data length */
2277 if ((data->bytes_xfered + init_cnt) ==
2278 (data->blksz * data->blocks))
Ben Dooks76184ac2015-03-25 11:27:52 +00002279 mci_fifo_writeq(host->fifo_reg, host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05002280 }
2281}
2282
2283static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2284{
James Hogan34b664a2011-06-24 13:57:56 +01002285#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2286 if (unlikely((unsigned long)buf & 0x7)) {
2287 while (cnt >= 8) {
2288 /* pull data from fifo into aligned buffer */
2289 u64 aligned_buf[16];
2290 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2291 int items = len >> 3;
2292 int i;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002293
James Hogan34b664a2011-06-24 13:57:56 +01002294 for (i = 0; i < items; ++i)
Ben Dooks76184ac2015-03-25 11:27:52 +00002295 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2296
James Hogan34b664a2011-06-24 13:57:56 +01002297 /* memcpy from aligned buffer into output buffer */
2298 memcpy(buf, aligned_buf, len);
2299 buf += len;
2300 cnt -= len;
2301 }
2302 } else
2303#endif
2304 {
2305 u64 *pdata = buf;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002306
James Hogan34b664a2011-06-24 13:57:56 +01002307 for (; cnt >= 8; cnt -= 8)
Ben Dooks76184ac2015-03-25 11:27:52 +00002308 *pdata++ = mci_fifo_readq(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002309 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05002310 }
James Hogan34b664a2011-06-24 13:57:56 +01002311 if (cnt) {
Ben Dooks76184ac2015-03-25 11:27:52 +00002312 host->part_buf = mci_fifo_readq(host->fifo_reg);
James Hogan34b664a2011-06-24 13:57:56 +01002313 dw_mci_pull_final_bytes(host, buf, cnt);
2314 }
2315}
2316
2317static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2318{
2319 int len;
2320
2321 /* get remaining partial bytes */
2322 len = dw_mci_pull_part_bytes(host, buf, cnt);
2323 if (unlikely(len == cnt))
2324 return;
2325 buf += len;
2326 cnt -= len;
2327
2328 /* get the rest of the data */
2329 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05002330}
2331
Kyoungil Kim87a74d32013-01-22 16:46:30 +09002332static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05002333{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002334 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2335 void *buf;
2336 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05002337 struct mmc_data *data = host->data;
2338 int shift = host->data_shift;
2339 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04002340 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002341 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05002342
2343 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002344 if (!sg_miter_next(sg_miter))
2345 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05002346
Imre Deak4225fc82013-02-27 17:02:57 -08002347 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002348 buf = sg_miter->addr;
2349 remain = sg_miter->length;
2350 offset = 0;
2351
2352 do {
2353 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2354 << shift) + host->part_buf_count;
2355 len = min(remain, fcnt);
2356 if (!len)
2357 break;
2358 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04002359 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05002360 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002361 remain -= len;
2362 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05002363
Seungwon Jeone74f3a92012-08-01 09:30:46 +09002364 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05002365 status = mci_readl(host, MINTSTS);
2366 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09002367 /* if the RXDR is ready read again */
2368 } while ((status & SDMMC_INT_RXDR) ||
2369 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002370
2371 if (!remain) {
2372 if (!sg_miter_next(sg_miter))
2373 goto done;
2374 sg_miter->consumed = 0;
2375 }
2376 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05002377 return;
2378
2379done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002380 sg_miter_stop(sg_miter);
2381 host->sg = NULL;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002382 smp_wmb(); /* drain writebuffer */
Will Newtonf95f3852011-01-02 01:11:59 -05002383 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2384}
2385
2386static void dw_mci_write_data_pio(struct dw_mci *host)
2387{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002388 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2389 void *buf;
2390 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05002391 struct mmc_data *data = host->data;
2392 int shift = host->data_shift;
2393 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04002394 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002395 unsigned int fifo_depth = host->fifo_depth;
2396 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05002397
2398 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002399 if (!sg_miter_next(sg_miter))
2400 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05002401
Imre Deak4225fc82013-02-27 17:02:57 -08002402 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002403 buf = sg_miter->addr;
2404 remain = sg_miter->length;
2405 offset = 0;
2406
2407 do {
2408 fcnt = ((fifo_depth -
2409 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2410 << shift) - host->part_buf_count;
2411 len = min(remain, fcnt);
2412 if (!len)
2413 break;
2414 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04002415 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05002416 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002417 remain -= len;
2418 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05002419
Seungwon Jeone74f3a92012-08-01 09:30:46 +09002420 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05002421 status = mci_readl(host, MINTSTS);
2422 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05002423 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002424
2425 if (!remain) {
2426 if (!sg_miter_next(sg_miter))
2427 goto done;
2428 sg_miter->consumed = 0;
2429 }
2430 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05002431 return;
2432
2433done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09002434 sg_miter_stop(sg_miter);
2435 host->sg = NULL;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002436 smp_wmb(); /* drain writebuffer */
Will Newtonf95f3852011-01-02 01:11:59 -05002437 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2438}
2439
2440static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2441{
2442 if (!host->cmd_status)
2443 host->cmd_status = status;
2444
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002445 smp_wmb(); /* drain writebuffer */
Will Newtonf95f3852011-01-02 01:11:59 -05002446
2447 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2448 tasklet_schedule(&host->tasklet);
2449}
2450
Doug Anderson6130e7a2014-10-14 09:33:09 -07002451static void dw_mci_handle_cd(struct dw_mci *host)
2452{
2453 int i;
2454
2455 for (i = 0; i < host->num_slots; i++) {
2456 struct dw_mci_slot *slot = host->slot[i];
2457
2458 if (!slot)
2459 continue;
2460
2461 if (slot->mmc->ops->card_event)
2462 slot->mmc->ops->card_event(slot->mmc);
2463 mmc_detect_change(slot->mmc,
2464 msecs_to_jiffies(host->pdata->detect_delay_ms));
2465 }
2466}
2467
Will Newtonf95f3852011-01-02 01:11:59 -05002468static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2469{
2470 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09002471 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05302472 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05002473
Markos Chandras1fb5f682013-03-12 10:53:11 +00002474 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2475
2476 if (pending) {
Doug Anderson01730552014-08-22 19:17:51 +05302477 /* Check volt switch first, since it can look like an error */
2478 if ((host->state == STATE_SENDING_CMD11) &&
2479 (pending & SDMMC_INT_VOLT_SWITCH)) {
Doug Anderson49ba0302015-04-03 11:13:07 -07002480 unsigned long irqflags;
Doug Anderson5c935162015-03-09 16:18:21 -07002481
Doug Anderson01730552014-08-22 19:17:51 +05302482 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2483 pending &= ~SDMMC_INT_VOLT_SWITCH;
Doug Anderson49ba0302015-04-03 11:13:07 -07002484
2485 /*
2486 * Hold the lock; we know cmd11_timer can't be kicked
2487 * off after the lock is released, so safe to delete.
2488 */
2489 spin_lock_irqsave(&host->irq_lock, irqflags);
Doug Anderson01730552014-08-22 19:17:51 +05302490 dw_mci_cmd_interrupt(host, pending);
Doug Anderson49ba0302015-04-03 11:13:07 -07002491 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2492
2493 del_timer(&host->cmd11_timer);
Doug Anderson01730552014-08-22 19:17:51 +05302494 }
2495
Will Newtonf95f3852011-01-02 01:11:59 -05002496 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2497 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09002498 host->cmd_status = pending;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002499 smp_wmb(); /* drain writebuffer */
Will Newtonf95f3852011-01-02 01:11:59 -05002500 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05002501 }
2502
2503 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2504 /* if there is an error report DATA_ERROR */
2505 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09002506 host->data_status = pending;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002507 smp_wmb(); /* drain writebuffer */
Will Newtonf95f3852011-01-02 01:11:59 -05002508 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09002509 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05002510 }
2511
2512 if (pending & SDMMC_INT_DATA_OVER) {
Jaehoon Chung16a34572016-06-21 14:35:37 +09002513 del_timer(&host->dto_timer);
Addy Ke57e10482015-08-11 01:27:18 +09002514
Will Newtonf95f3852011-01-02 01:11:59 -05002515 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2516 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09002517 host->data_status = pending;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002518 smp_wmb(); /* drain writebuffer */
Will Newtonf95f3852011-01-02 01:11:59 -05002519 if (host->dir_status == DW_MCI_RECV_STATUS) {
2520 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09002521 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05002522 }
2523 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2524 tasklet_schedule(&host->tasklet);
2525 }
2526
2527 if (pending & SDMMC_INT_RXDR) {
2528 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01002529 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09002530 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05002531 }
2532
2533 if (pending & SDMMC_INT_TXDR) {
2534 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01002535 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05002536 dw_mci_write_data_pio(host);
2537 }
2538
2539 if (pending & SDMMC_INT_CMD_DONE) {
2540 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09002541 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05002542 }
2543
2544 if (pending & SDMMC_INT_CD) {
2545 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Doug Anderson6130e7a2014-10-14 09:33:09 -07002546 dw_mci_handle_cd(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002547 }
2548
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05302549 /* Handle SDIO Interrupts */
2550 for (i = 0; i < host->num_slots; i++) {
2551 struct dw_mci_slot *slot = host->slot[i];
Doug Andersoned2540e2015-02-25 10:11:52 -08002552
2553 if (!slot)
2554 continue;
2555
Addy Ke76756232014-11-04 22:03:09 +08002556 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2557 mci_writel(host, RINTSTS,
2558 SDMMC_INT_SDIO(slot->sdio_id));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05302559 mmc_signal_sdio_irq(slot->mmc);
2560 }
2561 }
2562
Markos Chandras1fb5f682013-03-12 10:53:11 +00002563 }
Will Newtonf95f3852011-01-02 01:11:59 -05002564
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002565 if (host->use_dma != TRANS_MODE_IDMAC)
2566 return IRQ_HANDLED;
2567
2568 /* Handle IDMA interrupts */
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +00002569 if (host->dma_64bit_address == 1) {
2570 pending = mci_readl(host, IDSTS64);
2571 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2572 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2573 SDMMC_IDMAC_INT_RI);
2574 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
Shawn Linfaecf412016-06-24 15:39:52 +08002575 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2576 host->dma_ops->complete((void *)host);
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +00002577 }
2578 } else {
2579 pending = mci_readl(host, IDSTS);
2580 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2581 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2582 SDMMC_IDMAC_INT_RI);
2583 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Shawn Linfaecf412016-06-24 15:39:52 +08002584 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2585 host->dma_ops->complete((void *)host);
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +00002586 }
Will Newtonf95f3852011-01-02 01:11:59 -05002587 }
Will Newtonf95f3852011-01-02 01:11:59 -05002588
2589 return IRQ_HANDLED;
2590}
2591
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002592static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002593{
2594 struct mmc_host *mmc;
2595 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002596 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002597 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002598 u32 freq[2];
Will Newtonf95f3852011-01-02 01:11:59 -05002599
Thomas Abraham4a909202012-09-17 18:16:35 +00002600 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002601 if (!mmc)
2602 return -ENOMEM;
2603
2604 slot = mmc_priv(mmc);
2605 slot->id = id;
Addy Ke76756232014-11-04 22:03:09 +08002606 slot->sdio_id = host->sdio_id0 + id;
Will Newtonf95f3852011-01-02 01:11:59 -05002607 slot->mmc = mmc;
2608 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002609 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002610
2611 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002612 if (of_property_read_u32_array(host->dev->of_node,
2613 "clock-freq-min-max", freq, 2)) {
2614 mmc->f_min = DW_MCI_FREQ_MIN;
2615 mmc->f_max = DW_MCI_FREQ_MAX;
2616 } else {
2617 mmc->f_min = freq[0];
2618 mmc->f_max = freq[1];
2619 }
Will Newtonf95f3852011-01-02 01:11:59 -05002620
Yuvaraj CD51da2242014-08-22 19:17:50 +05302621 /*if there are external regulators, get them*/
2622 ret = mmc_regulator_get_supply(mmc);
2623 if (ret == -EPROBE_DEFER)
Doug Anderson3cf890f2014-08-25 11:19:04 -07002624 goto err_host_allocated;
Yuvaraj CD51da2242014-08-22 19:17:50 +05302625
2626 if (!mmc->ocr_avail)
2627 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
Will Newtonf95f3852011-01-02 01:11:59 -05002628
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002629 if (host->pdata->caps)
2630 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002631
Jaehoon Chung6024e162016-07-15 10:54:50 +09002632 /*
2633 * Support MMC_CAP_ERASE by default.
2634 * It needs to use trim/discard/erase commands.
2635 */
2636 mmc->caps |= MMC_CAP_ERASE;
2637
Abhilash Kesavanab269122012-11-19 10:26:21 +05302638 if (host->pdata->pm_caps)
2639 mmc->pm_caps = host->pdata->pm_caps;
2640
Thomas Abraham800d78b2012-09-17 18:16:42 +00002641 if (host->dev->of_node) {
2642 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2643 if (ctrl_id < 0)
2644 ctrl_id = 0;
2645 } else {
2646 ctrl_id = to_platform_device(host->dev)->id;
2647 }
James Hogancb27a842012-10-16 09:43:08 +01002648 if (drv_data && drv_data->caps)
2649 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002650
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002651 if (host->pdata->caps2)
2652 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002653
Doug Anderson3cf890f2014-08-25 11:19:04 -07002654 ret = mmc_of_parse(mmc);
2655 if (ret)
2656 goto err_host_allocated;
Will Newtonf95f3852011-01-02 01:11:59 -05002657
Jaehoon Chung2b708df2015-08-06 16:23:25 +09002658 /* Useful defaults if platform data is unset. */
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002659 if (host->use_dma == TRANS_MODE_IDMAC) {
Jaehoon Chung2b708df2015-08-06 16:23:25 +09002660 mmc->max_segs = host->ring_size;
Jaehoon Chung225faf82016-05-04 11:24:14 +09002661 mmc->max_blk_size = 65535;
Jaehoon Chung2b708df2015-08-06 16:23:25 +09002662 mmc->max_seg_size = 0x1000;
2663 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2664 mmc->max_blk_count = mmc->max_req_size / 512;
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002665 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2666 mmc->max_segs = 64;
Jaehoon Chung225faf82016-05-04 11:24:14 +09002667 mmc->max_blk_size = 65535;
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002668 mmc->max_blk_count = 65535;
2669 mmc->max_req_size =
2670 mmc->max_blk_size * mmc->max_blk_count;
2671 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002672 } else {
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002673 /* TRANS_MODE_PIO */
Jaehoon Chung2b708df2015-08-06 16:23:25 +09002674 mmc->max_segs = 64;
Jaehoon Chung225faf82016-05-04 11:24:14 +09002675 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
Jaehoon Chung2b708df2015-08-06 16:23:25 +09002676 mmc->max_blk_count = 512;
2677 mmc->max_req_size = mmc->max_blk_size *
2678 mmc->max_blk_count;
2679 mmc->max_seg_size = mmc->max_req_size;
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002680 }
Will Newtonf95f3852011-01-02 01:11:59 -05002681
Shawn Linc0834a52016-05-27 14:36:40 +08002682 dw_mci_get_cd(mmc);
Jaehoon Chungae0eb342014-03-03 11:36:48 +09002683
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002684 ret = mmc_add_host(mmc);
2685 if (ret)
Doug Anderson3cf890f2014-08-25 11:19:04 -07002686 goto err_host_allocated;
Will Newtonf95f3852011-01-02 01:11:59 -05002687
2688#if defined(CONFIG_DEBUG_FS)
2689 dw_mci_init_debugfs(slot);
2690#endif
2691
Will Newtonf95f3852011-01-02 01:11:59 -05002692 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002693
Doug Anderson3cf890f2014-08-25 11:19:04 -07002694err_host_allocated:
Thomas Abraham800d78b2012-09-17 18:16:42 +00002695 mmc_free_host(mmc);
Yuvaraj CD51da2242014-08-22 19:17:50 +05302696 return ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002697}
2698
2699static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2700{
Will Newtonf95f3852011-01-02 01:11:59 -05002701 /* Debugfs stuff is cleaned up by mmc core */
2702 mmc_remove_host(slot->mmc);
2703 slot->host->slot[id] = NULL;
2704 mmc_free_host(slot->mmc);
2705}
2706
2707static void dw_mci_init_dma(struct dw_mci *host)
2708{
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +00002709 int addr_config;
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002710 struct device *dev = host->dev;
2711 struct device_node *np = dev->of_node;
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +00002712
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002713 /*
2714 * Check tansfer mode from HCON[17:16]
2715 * Clear the ambiguous description of dw_mmc databook:
2716 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2717 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2718 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2719 * 2b'11: Non DW DMA Interface -> pio only
2720 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2721 * simpler request/acknowledge handshake mechanism and both of them
2722 * are regarded as external dma master for dw_mmc.
2723 */
2724 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2725 if (host->use_dma == DMA_INTERFACE_IDMA) {
2726 host->use_dma = TRANS_MODE_IDMAC;
2727 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2728 host->use_dma == DMA_INTERFACE_GDMA) {
2729 host->use_dma = TRANS_MODE_EDMAC;
Prabu Thangamuthu69d99fd2014-10-20 07:12:33 +00002730 } else {
Will Newtonf95f3852011-01-02 01:11:59 -05002731 goto no_dma;
2732 }
2733
2734 /* Determine which DMA interface to use */
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002735 if (host->use_dma == TRANS_MODE_IDMAC) {
2736 /*
2737 * Check ADDR_CONFIG bit in HCON to find
2738 * IDMAC address bus width
2739 */
Shawn Lin70692752015-09-16 14:41:37 +08002740 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
Will Newtonf95f3852011-01-02 01:11:59 -05002741
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002742 if (addr_config == 1) {
2743 /* host supports IDMAC in 64-bit address mode */
2744 host->dma_64bit_address = 1;
2745 dev_info(host->dev,
2746 "IDMAC supports 64-bit address mode.\n");
2747 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2748 dma_set_coherent_mask(host->dev,
2749 DMA_BIT_MASK(64));
2750 } else {
2751 /* host supports IDMAC in 32-bit address mode */
2752 host->dma_64bit_address = 0;
2753 dev_info(host->dev,
2754 "IDMAC supports 32-bit address mode.\n");
2755 }
2756
2757 /* Alloc memory for sg translation */
Shawn Lincc190d42016-09-02 12:14:39 +08002758 host->sg_cpu = dmam_alloc_coherent(host->dev,
2759 DESC_RING_BUF_SZ,
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002760 &host->sg_dma, GFP_KERNEL);
2761 if (!host->sg_cpu) {
2762 dev_err(host->dev,
2763 "%s: could not alloc DMA memory\n",
2764 __func__);
2765 goto no_dma;
2766 }
2767
2768 host->dma_ops = &dw_mci_idmac_ops;
2769 dev_info(host->dev, "Using internal DMA controller.\n");
2770 } else {
2771 /* TRANS_MODE_EDMAC: check dma bindings again */
2772 if ((of_property_count_strings(np, "dma-names") < 0) ||
2773 (!of_find_property(np, "dmas", NULL))) {
2774 goto no_dma;
2775 }
2776 host->dma_ops = &dw_mci_edmac_ops;
2777 dev_info(host->dev, "Using external DMA controller.\n");
2778 }
Will Newtonf95f3852011-01-02 01:11:59 -05002779
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002780 if (host->dma_ops->init && host->dma_ops->start &&
2781 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002782 if (host->dma_ops->init(host)) {
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002783 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2784 __func__);
Will Newtonf95f3852011-01-02 01:11:59 -05002785 goto no_dma;
2786 }
2787 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002788 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002789 goto no_dma;
2790 }
2791
Will Newtonf95f3852011-01-02 01:11:59 -05002792 return;
2793
2794no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002795 dev_info(host->dev, "Using PIO mode.\n");
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002796 host->use_dma = TRANS_MODE_PIO;
Will Newtonf95f3852011-01-02 01:11:59 -05002797}
2798
Seungwon Jeon31bff452013-08-31 00:14:23 +09002799static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002800{
2801 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002802 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002803
Seungwon Jeon31bff452013-08-31 00:14:23 +09002804 ctrl = mci_readl(host, CTRL);
2805 ctrl |= reset;
2806 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002807
2808 /* wait till resets clear */
2809 do {
2810 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002811 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002812 return true;
2813 } while (time_before(jiffies, timeout));
2814
Seungwon Jeon31bff452013-08-31 00:14:23 +09002815 dev_err(host->dev,
2816 "Timeout resetting block (ctrl reset %#x)\n",
2817 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002818
2819 return false;
2820}
2821
Sonny Rao3a33a942014-08-04 18:19:50 -07002822static bool dw_mci_reset(struct dw_mci *host)
Seungwon Jeon31bff452013-08-31 00:14:23 +09002823{
Sonny Rao3a33a942014-08-04 18:19:50 -07002824 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2825 bool ret = false;
2826
Seungwon Jeon31bff452013-08-31 00:14:23 +09002827 /*
2828 * Reseting generates a block interrupt, hence setting
2829 * the scatter-gather pointer to NULL.
2830 */
2831 if (host->sg) {
2832 sg_miter_stop(&host->sg_miter);
2833 host->sg = NULL;
2834 }
2835
Sonny Rao3a33a942014-08-04 18:19:50 -07002836 if (host->use_dma)
2837 flags |= SDMMC_CTRL_DMA_RESET;
Seungwon Jeon31bff452013-08-31 00:14:23 +09002838
Sonny Rao3a33a942014-08-04 18:19:50 -07002839 if (dw_mci_ctrl_reset(host, flags)) {
2840 /*
2841 * In all cases we clear the RAWINTS register to clear any
2842 * interrupts.
2843 */
2844 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2845
2846 /* if using dma we wait for dma_req to clear */
2847 if (host->use_dma) {
2848 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2849 u32 status;
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002850
Sonny Rao3a33a942014-08-04 18:19:50 -07002851 do {
2852 status = mci_readl(host, STATUS);
2853 if (!(status & SDMMC_STATUS_DMA_REQ))
2854 break;
2855 cpu_relax();
2856 } while (time_before(jiffies, timeout));
2857
2858 if (status & SDMMC_STATUS_DMA_REQ) {
2859 dev_err(host->dev,
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002860 "%s: Timeout waiting for dma_req to clear during reset\n",
2861 __func__);
Sonny Rao3a33a942014-08-04 18:19:50 -07002862 goto ciu_out;
2863 }
2864
2865 /* when using DMA next we reset the fifo again */
2866 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2867 goto ciu_out;
2868 }
2869 } else {
2870 /* if the controller reset bit did clear, then set clock regs */
2871 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002872 dev_err(host->dev,
2873 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
Sonny Rao3a33a942014-08-04 18:19:50 -07002874 __func__);
2875 goto ciu_out;
2876 }
2877 }
2878
Shawn Lin3fc7eae2015-09-16 14:41:23 +08002879 if (host->use_dma == TRANS_MODE_IDMAC)
2880 /* It is also recommended that we reset and reprogram idmac */
2881 dw_mci_idmac_reset(host);
Sonny Rao3a33a942014-08-04 18:19:50 -07002882
2883 ret = true;
2884
2885ciu_out:
2886 /* After a CTRL reset we need to have CIU set clock registers */
2887 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2888
2889 return ret;
Seungwon Jeon31bff452013-08-31 00:14:23 +09002890}
2891
Doug Anderson5c935162015-03-09 16:18:21 -07002892static void dw_mci_cmd11_timer(unsigned long arg)
2893{
2894 struct dw_mci *host = (struct dw_mci *)arg;
2895
Doug Andersonfd674192015-04-03 11:13:06 -07002896 if (host->state != STATE_SENDING_CMD11) {
2897 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2898 return;
2899 }
Doug Anderson5c935162015-03-09 16:18:21 -07002900
2901 host->cmd_status = SDMMC_INT_RTO;
2902 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2903 tasklet_schedule(&host->tasklet);
2904}
2905
Addy Ke57e10482015-08-11 01:27:18 +09002906static void dw_mci_dto_timer(unsigned long arg)
2907{
2908 struct dw_mci *host = (struct dw_mci *)arg;
2909
2910 switch (host->state) {
2911 case STATE_SENDING_DATA:
2912 case STATE_DATA_BUSY:
2913 /*
2914 * If DTO interrupt does NOT come in sending data state,
2915 * we should notify the driver to terminate current transfer
2916 * and report a data timeout to the core.
2917 */
2918 host->data_status = SDMMC_INT_DRTO;
2919 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2920 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2921 tasklet_schedule(&host->tasklet);
2922 break;
2923 default:
2924 break;
2925 }
2926}
2927
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002928#ifdef CONFIG_OF
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002929static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2930{
2931 struct dw_mci_board *pdata;
2932 struct device *dev = host->dev;
2933 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002934 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shawn Line8cc37b2016-01-21 14:52:52 +08002935 int ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002936 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002937
2938 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
Beomho Seobf3707e2014-12-23 21:07:33 +09002939 if (!pdata)
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002940 return ERR_PTR(-ENOMEM);
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002941
Guodong Xud6786fe2016-08-12 16:51:26 +08002942 /* find reset controller when exist */
Jaehoon Chung3a667e32016-10-31 11:49:42 +09002943 pdata->rstc = devm_reset_control_get_optional(dev, "reset");
Guodong Xud6786fe2016-08-12 16:51:26 +08002944 if (IS_ERR(pdata->rstc)) {
2945 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
2946 return ERR_PTR(-EPROBE_DEFER);
2947 }
2948
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002949 /* find out number of slots supported */
Shawn Lin8a629d22016-02-02 14:11:25 +08002950 of_property_read_u32(np, "num-slots", &pdata->num_slots);
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002951
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002952 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
Shawn Lin0e3a22c2015-08-03 15:07:21 +08002953 dev_info(dev,
2954 "fifo-depth property not found, using value of FIFOTH register as default\n");
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002955
2956 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2957
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002958 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2959 pdata->bus_hz = clock_frequency;
2960
James Hogancb27a842012-10-16 09:43:08 +01002961 if (drv_data && drv_data->parse_dt) {
2962 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002963 if (ret)
2964 return ERR_PTR(ret);
2965 }
2966
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002967 return pdata;
2968}
2969
2970#else /* CONFIG_OF */
2971static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2972{
2973 return ERR_PTR(-EINVAL);
2974}
2975#endif /* CONFIG_OF */
2976
Doug Andersonfa0c3282015-02-25 10:11:51 -08002977static void dw_mci_enable_cd(struct dw_mci *host)
2978{
Doug Andersonfa0c3282015-02-25 10:11:51 -08002979 unsigned long irqflags;
2980 u32 temp;
2981 int i;
Shawn Line8cc37b2016-01-21 14:52:52 +08002982 struct dw_mci_slot *slot;
Doug Andersonfa0c3282015-02-25 10:11:51 -08002983
Shawn Line8cc37b2016-01-21 14:52:52 +08002984 /*
2985 * No need for CD if all slots have a non-error GPIO
2986 * as well as broken card detection is found.
2987 */
Doug Andersonfa0c3282015-02-25 10:11:51 -08002988 for (i = 0; i < host->num_slots; i++) {
Shawn Line8cc37b2016-01-21 14:52:52 +08002989 slot = host->slot[i];
2990 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
2991 return;
Doug Andersonfa0c3282015-02-25 10:11:51 -08002992
Arnd Bergmann287980e2016-05-27 23:23:25 +02002993 if (mmc_gpio_get_cd(slot->mmc) < 0)
Doug Andersonfa0c3282015-02-25 10:11:51 -08002994 break;
2995 }
2996 if (i == host->num_slots)
2997 return;
2998
2999 spin_lock_irqsave(&host->irq_lock, irqflags);
3000 temp = mci_readl(host, INTMASK);
3001 temp |= SDMMC_INT_CD;
3002 mci_writel(host, INTMASK, temp);
3003 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3004}
3005
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05303006int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05003007{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00003008 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05303009 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05003010 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00003011 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05003012
Thomas Abrahamc91eab42012-09-17 18:16:40 +00003013 if (!host->pdata) {
3014 host->pdata = dw_mci_parse_dt(host);
Guodong Xud6786fe2016-08-12 16:51:26 +08003015 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3016 return -EPROBE_DEFER;
3017 } else if (IS_ERR(host->pdata)) {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00003018 dev_err(host->dev, "platform data not available\n");
3019 return -EINVAL;
3020 }
Will Newtonf95f3852011-01-02 01:11:59 -05003021 }
3022
Seungwon Jeon780f22a2012-11-28 19:26:03 +09003023 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003024 if (IS_ERR(host->biu_clk)) {
3025 dev_dbg(host->dev, "biu clock not available\n");
3026 } else {
3027 ret = clk_prepare_enable(host->biu_clk);
3028 if (ret) {
3029 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003030 return ret;
3031 }
Will Newtonf95f3852011-01-02 01:11:59 -05003032 }
3033
Seungwon Jeon780f22a2012-11-28 19:26:03 +09003034 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003035 if (IS_ERR(host->ciu_clk)) {
3036 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07003037 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003038 } else {
3039 ret = clk_prepare_enable(host->ciu_clk);
3040 if (ret) {
3041 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003042 goto err_clk_biu;
3043 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003044
Doug Anderson3c6d89e2013-06-07 10:28:30 -07003045 if (host->pdata->bus_hz) {
3046 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3047 if (ret)
3048 dev_warn(host->dev,
Jaehoon Chung612de4c2014-03-03 11:36:42 +09003049 "Unable to set bus rate to %uHz\n",
Doug Anderson3c6d89e2013-06-07 10:28:30 -07003050 host->pdata->bus_hz);
3051 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003052 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07003053 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003054
Jaehoon Chung612de4c2014-03-03 11:36:42 +09003055 if (!host->bus_hz) {
3056 dev_err(host->dev,
3057 "Platform data must supply bus speed\n");
3058 ret = -ENODEV;
3059 goto err_clk_ciu;
3060 }
3061
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09003062 if (drv_data && drv_data->init) {
3063 ret = drv_data->init(host);
3064 if (ret) {
3065 dev_err(host->dev,
3066 "implementation specific init failed\n");
3067 goto err_clk_ciu;
3068 }
3069 }
3070
Guodong Xud6786fe2016-08-12 16:51:26 +08003071 if (!IS_ERR(host->pdata->rstc)) {
3072 reset_control_assert(host->pdata->rstc);
3073 usleep_range(10, 50);
3074 reset_control_deassert(host->pdata->rstc);
3075 }
3076
Doug Anderson5c935162015-03-09 16:18:21 -07003077 setup_timer(&host->cmd11_timer,
3078 dw_mci_cmd11_timer, (unsigned long)host);
3079
Jaehoon Chung16a34572016-06-21 14:35:37 +09003080 setup_timer(&host->dto_timer,
3081 dw_mci_dto_timer, (unsigned long)host);
Addy Ke57e10482015-08-11 01:27:18 +09003082
Will Newtonf95f3852011-01-02 01:11:59 -05003083 spin_lock_init(&host->lock);
Doug Andersonf8c58c12014-12-02 15:42:47 -08003084 spin_lock_init(&host->irq_lock);
Will Newtonf95f3852011-01-02 01:11:59 -05003085 INIT_LIST_HEAD(&host->queue);
3086
Will Newtonf95f3852011-01-02 01:11:59 -05003087 /*
3088 * Get the host data width - this assumes that HCON has been set with
3089 * the correct values.
3090 */
Shawn Lin70692752015-09-16 14:41:37 +08003091 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
Will Newtonf95f3852011-01-02 01:11:59 -05003092 if (!i) {
3093 host->push_data = dw_mci_push_data16;
3094 host->pull_data = dw_mci_pull_data16;
3095 width = 16;
3096 host->data_shift = 1;
3097 } else if (i == 2) {
3098 host->push_data = dw_mci_push_data64;
3099 host->pull_data = dw_mci_pull_data64;
3100 width = 64;
3101 host->data_shift = 3;
3102 } else {
3103 /* Check for a reserved value, and warn if it is */
3104 WARN((i != 1),
3105 "HCON reports a reserved host data width!\n"
3106 "Defaulting to 32-bit access.\n");
3107 host->push_data = dw_mci_push_data32;
3108 host->pull_data = dw_mci_pull_data32;
3109 width = 32;
3110 host->data_shift = 2;
3111 }
3112
3113 /* Reset all blocks */
Shawn Lin37444152016-01-22 15:43:12 +08003114 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3115 ret = -ENODEV;
3116 goto err_clk_ciu;
3117 }
Seungwon Jeon141a7122012-05-22 13:01:03 +09003118
3119 host->dma_ops = host->pdata->dma_ops;
3120 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05003121
3122 /* Clear the interrupts for the host controller */
3123 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3124 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3125
3126 /* Put in max timeout */
3127 mci_writel(host, TMOUT, 0xFFFFFFFF);
3128
3129 /*
3130 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3131 * Tx Mark = fifo_size / 2 DMA Size = 8
3132 */
James Hoganb86d8252011-06-24 13:57:18 +01003133 if (!host->pdata->fifo_depth) {
3134 /*
3135 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3136 * have been overwritten by the bootloader, just like we're
3137 * about to do, so if you know the value for your hardware, you
3138 * should put it in the platform data.
3139 */
3140 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00003141 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01003142 } else {
3143 fifo_size = host->pdata->fifo_depth;
3144 }
3145 host->fifo_depth = fifo_size;
Seungwon Jeon52426892013-08-31 00:13:42 +09003146 host->fifoth_val =
3147 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09003148 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05003149
3150 /* disable clock to CIU */
3151 mci_writel(host, CLKENA, 0);
3152 mci_writel(host, CLKSRC, 0);
3153
James Hogan63008762013-03-12 10:43:54 +00003154 /*
3155 * In 2.40a spec, Data offset is changed.
3156 * Need to check the version-id and set data-offset for DATA register.
3157 */
3158 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3159 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3160
3161 if (host->verid < DW_MMC_240A)
Ben Dooks76184ac2015-03-25 11:27:52 +00003162 host->fifo_reg = host->regs + DATA_OFFSET;
James Hogan63008762013-03-12 10:43:54 +00003163 else
Ben Dooks76184ac2015-03-25 11:27:52 +00003164 host->fifo_reg = host->regs + DATA_240A_OFFSET;
James Hogan63008762013-03-12 10:43:54 +00003165
Will Newtonf95f3852011-01-02 01:11:59 -05003166 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09003167 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3168 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05003169 if (ret)
Doug Anderson6130e7a2014-10-14 09:33:09 -07003170 goto err_dmaunmap;
Will Newtonf95f3852011-01-02 01:11:59 -05003171
Will Newtonf95f3852011-01-02 01:11:59 -05003172 if (host->pdata->num_slots)
3173 host->num_slots = host->pdata->num_slots;
3174 else
Shawn Lin8a629d22016-02-02 14:11:25 +08003175 host->num_slots = 1;
3176
3177 if (host->num_slots < 1 ||
3178 host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
3179 dev_err(host->dev,
3180 "Platform data must supply correct num_slots.\n");
3181 ret = -ENODEV;
3182 goto err_clk_ciu;
3183 }
Will Newtonf95f3852011-01-02 01:11:59 -05003184
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05303185 /*
Doug Andersonfa0c3282015-02-25 10:11:51 -08003186 * Enable interrupts for command done, data over, data empty,
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05303187 * receive ready and error such as transmit, receive timeout, crc error
3188 */
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05303189 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3190 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
Doug Andersonfa0c3282015-02-25 10:11:51 -08003191 DW_MCI_ERROR_FLAGS);
Shawn Lin0e3a22c2015-08-03 15:07:21 +08003192 /* Enable mci interrupt */
3193 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05303194
Shawn Lin0e3a22c2015-08-03 15:07:21 +08003195 dev_info(host->dev,
3196 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05303197 host->irq, width, fifo_size);
3198
Will Newtonf95f3852011-01-02 01:11:59 -05003199 /* We need at least one slot to succeed */
3200 for (i = 0; i < host->num_slots; i++) {
3201 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00003202 if (ret)
3203 dev_dbg(host->dev, "slot %d init failed\n", i);
3204 else
3205 init_slots++;
3206 }
3207
3208 if (init_slots) {
3209 dev_info(host->dev, "%d slots initialized\n", init_slots);
3210 } else {
Shawn Lin0e3a22c2015-08-03 15:07:21 +08003211 dev_dbg(host->dev,
3212 "attempted to initialize %d slots, but failed on all\n",
3213 host->num_slots);
Doug Anderson6130e7a2014-10-14 09:33:09 -07003214 goto err_dmaunmap;
Will Newtonf95f3852011-01-02 01:11:59 -05003215 }
3216
Doug Andersonb793f652015-03-11 15:15:14 -07003217 /* Now that slots are all setup, we can enable card detect */
3218 dw_mci_enable_cd(host);
3219
Will Newtonf95f3852011-01-02 01:11:59 -05003220 return 0;
3221
Will Newtonf95f3852011-01-02 01:11:59 -05003222err_dmaunmap:
3223 if (host->use_dma && host->dma_ops->exit)
3224 host->dma_ops->exit(host);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003225
Guodong Xud6786fe2016-08-12 16:51:26 +08003226 if (!IS_ERR(host->pdata->rstc))
3227 reset_control_assert(host->pdata->rstc);
3228
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003229err_clk_ciu:
Jaehoon Chung7037f3b2016-07-15 10:54:08 +09003230 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09003231
Thomas Abrahamf90a0612012-09-17 18:16:38 +00003232err_clk_biu:
Jaehoon Chung7037f3b2016-07-15 10:54:08 +09003233 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09003234
Will Newtonf95f3852011-01-02 01:11:59 -05003235 return ret;
3236}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05303237EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05003238
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05303239void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05003240{
Will Newtonf95f3852011-01-02 01:11:59 -05003241 int i;
3242
Will Newtonf95f3852011-01-02 01:11:59 -05003243 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00003244 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05003245 if (host->slot[i])
3246 dw_mci_cleanup_slot(host->slot[i], i);
3247 }
3248
Prabu Thangamuthu048fd7e2015-05-28 12:21:06 +00003249 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3250 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3251
Will Newtonf95f3852011-01-02 01:11:59 -05003252 /* disable clock to CIU */
3253 mci_writel(host, CLKENA, 0);
3254 mci_writel(host, CLKSRC, 0);
3255
Will Newtonf95f3852011-01-02 01:11:59 -05003256 if (host->use_dma && host->dma_ops->exit)
3257 host->dma_ops->exit(host);
3258
Guodong Xud6786fe2016-08-12 16:51:26 +08003259 if (!IS_ERR(host->pdata->rstc))
3260 reset_control_assert(host->pdata->rstc);
3261
Jaehoon Chung7037f3b2016-07-15 10:54:08 +09003262 clk_disable_unprepare(host->ciu_clk);
3263 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05003264}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05303265EXPORT_SYMBOL(dw_mci_remove);
3266
3267
Will Newtonf95f3852011-01-02 01:11:59 -05003268
Shawn Line9ed8832016-10-12 10:50:35 +08003269#ifdef CONFIG_PM
Shawn Lined24e1f2016-10-12 10:56:55 +08003270int dw_mci_runtime_suspend(struct device *dev)
Will Newtonf95f3852011-01-02 01:11:59 -05003271{
Shawn Lined24e1f2016-10-12 10:56:55 +08003272 struct dw_mci *host = dev_get_drvdata(dev);
3273
Shawn Lin3fc7eae2015-09-16 14:41:23 +08003274 if (host->use_dma && host->dma_ops->exit)
3275 host->dma_ops->exit(host);
3276
Shawn Lined24e1f2016-10-12 10:56:55 +08003277 clk_disable_unprepare(host->ciu_clk);
3278
3279 if (host->cur_slot &&
3280 (mmc_can_gpio_cd(host->cur_slot->mmc) ||
3281 !mmc_card_is_removable(host->cur_slot->mmc)))
3282 clk_disable_unprepare(host->biu_clk);
3283
Will Newtonf95f3852011-01-02 01:11:59 -05003284 return 0;
3285}
Shawn Lined24e1f2016-10-12 10:56:55 +08003286EXPORT_SYMBOL(dw_mci_runtime_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05003287
Shawn Lined24e1f2016-10-12 10:56:55 +08003288int dw_mci_runtime_resume(struct device *dev)
Will Newtonf95f3852011-01-02 01:11:59 -05003289{
Shawn Lined24e1f2016-10-12 10:56:55 +08003290 int i, ret = 0;
3291 struct dw_mci *host = dev_get_drvdata(dev);
Will Newtonf95f3852011-01-02 01:11:59 -05003292
Shawn Lined24e1f2016-10-12 10:56:55 +08003293 if (host->cur_slot &&
3294 (mmc_can_gpio_cd(host->cur_slot->mmc) ||
3295 !mmc_card_is_removable(host->cur_slot->mmc))) {
3296 ret = clk_prepare_enable(host->biu_clk);
3297 if (ret)
3298 return ret;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09003299 }
3300
Shawn Lined24e1f2016-10-12 10:56:55 +08003301 ret = clk_prepare_enable(host->ciu_clk);
3302 if (ret)
3303 return ret;
3304
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04003305 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09003306 host->dma_ops->init(host);
3307
Seungwon Jeon52426892013-08-31 00:13:42 +09003308 /*
3309 * Restore the initial value at FIFOTH register
3310 * And Invalidate the prev_blksz with zero
3311 */
Shawn Lined24e1f2016-10-12 10:56:55 +08003312 mci_writel(host, FIFOTH, host->fifoth_val);
3313 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09003314
Doug Anderson2eb29442013-08-31 00:11:49 +09003315 /* Put in max timeout */
3316 mci_writel(host, TMOUT, 0xFFFFFFFF);
3317
Jaehoon Chunge61cf112011-03-17 20:32:33 +09003318 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3319 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3320 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
Doug Andersonfa0c3282015-02-25 10:11:51 -08003321 DW_MCI_ERROR_FLAGS);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09003322 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3323
Will Newtonf95f3852011-01-02 01:11:59 -05003324 for (i = 0; i < host->num_slots; i++) {
3325 struct dw_mci_slot *slot = host->slot[i];
Shawn Lin0e3a22c2015-08-03 15:07:21 +08003326
Will Newtonf95f3852011-01-02 01:11:59 -05003327 if (!slot)
3328 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05303329 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3330 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3331 dw_mci_setup_bus(slot, true);
3332 }
Will Newtonf95f3852011-01-02 01:11:59 -05003333 }
Doug Andersonfa0c3282015-02-25 10:11:51 -08003334
3335 /* Now that slots are all setup, we can enable card detect */
3336 dw_mci_enable_cd(host);
3337
Shawn Lined24e1f2016-10-12 10:56:55 +08003338 return ret;
Shawn Line9ed8832016-10-12 10:50:35 +08003339}
3340EXPORT_SYMBOL(dw_mci_runtime_resume);
3341#endif /* CONFIG_PM */
Jaehoon Chung6fe88902011-12-08 19:23:03 +09003342
Will Newtonf95f3852011-01-02 01:11:59 -05003343static int __init dw_mci_init(void)
3344{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05303345 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05303346 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05003347}
3348
3349static void __exit dw_mci_exit(void)
3350{
Will Newtonf95f3852011-01-02 01:11:59 -05003351}
3352
3353module_init(dw_mci_init);
3354module_exit(dw_mci_exit);
3355
3356MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3357MODULE_AUTHOR("NXP Semiconductor VietNam");
3358MODULE_AUTHOR("Imagination Technologies Ltd");
3359MODULE_LICENSE("GPL v2");