blob: 7693236c946f22a0e12ef3594f37e21d761873d5 [file] [log] [blame]
Thomas Gleixnera912e802019-05-27 08:55:00 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00002/*
3 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
Alex Smith6a787682018-03-28 18:00:51 -03004 * Copyright (C) 2013, Imagination Technologies
5 *
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00006 * JZ4740 SD/MMC controller driver
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00007 */
8
Ezequiel Garcia7e7845f2018-03-28 18:00:45 -03009#include <linux/bitops.h>
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h>
Jamie Iles3119cbd2011-01-11 12:43:50 +000014#include <linux/err.h>
Ezequiel Garcia7e7845f2018-03-28 18:00:45 -030015#include <linux/interrupt.h>
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000016#include <linux/io.h>
17#include <linux/irq.h>
Ezequiel Garcia7e7845f2018-03-28 18:00:45 -030018#include <linux/mmc/host.h>
19#include <linux/mmc/slot-gpio.h>
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000020#include <linux/module.h>
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -030021#include <linux/of_device.h>
Paul Cercueilfa5ed6b2017-05-12 18:53:03 +020022#include <linux/pinctrl/consumer.h>
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000023#include <linux/platform_device.h>
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000024#include <linux/scatterlist.h>
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000025
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000026#include <asm/cacheflush.h>
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000027
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000028#define JZ_REG_MMC_STRPCL 0x00
29#define JZ_REG_MMC_STATUS 0x04
30#define JZ_REG_MMC_CLKRT 0x08
31#define JZ_REG_MMC_CMDAT 0x0C
32#define JZ_REG_MMC_RESTO 0x10
33#define JZ_REG_MMC_RDTO 0x14
34#define JZ_REG_MMC_BLKLEN 0x18
35#define JZ_REG_MMC_NOB 0x1C
36#define JZ_REG_MMC_SNOB 0x20
37#define JZ_REG_MMC_IMASK 0x24
38#define JZ_REG_MMC_IREG 0x28
39#define JZ_REG_MMC_CMD 0x2C
40#define JZ_REG_MMC_ARG 0x30
41#define JZ_REG_MMC_RESP_FIFO 0x34
42#define JZ_REG_MMC_RXFIFO 0x38
43#define JZ_REG_MMC_TXFIFO 0x3C
Zhou Yanjie80fe4e92019-10-12 13:13:20 +080044#define JZ_REG_MMC_LPM 0x40
Alex Smith6a787682018-03-28 18:00:51 -030045#define JZ_REG_MMC_DMAC 0x44
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000046
47#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
48#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
49#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
50#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
51#define JZ_MMC_STRPCL_RESET BIT(3)
52#define JZ_MMC_STRPCL_START_OP BIT(2)
53#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
54#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
55#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
56
57
58#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
59#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
60#define JZ_MMC_STATUS_PRG_DONE BIT(13)
61#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
62#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
63#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
64#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
65#define JZ_MMC_STATUS_CLK_EN BIT(8)
66#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
67#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
68#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
69#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
70#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
71#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
72#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
73#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
74
75#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
76#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
77
78
79#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
80#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
Zhou Yanjiea02f8f42019-10-12 13:13:15 +080081#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
82#define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +000083#define JZ_MMC_CMDAT_DMA_EN BIT(8)
84#define JZ_MMC_CMDAT_INIT BIT(7)
85#define JZ_MMC_CMDAT_BUSY BIT(6)
86#define JZ_MMC_CMDAT_STREAM BIT(5)
87#define JZ_MMC_CMDAT_WRITE BIT(4)
88#define JZ_MMC_CMDAT_DATA_EN BIT(3)
89#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
90#define JZ_MMC_CMDAT_RSP_R1 1
91#define JZ_MMC_CMDAT_RSP_R2 2
92#define JZ_MMC_CMDAT_RSP_R3 3
93
94#define JZ_MMC_IRQ_SDIO BIT(7)
95#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
96#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
97#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
98#define JZ_MMC_IRQ_PRG_DONE BIT(1)
99#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
100
Alex Smith6a787682018-03-28 18:00:51 -0300101#define JZ_MMC_DMAC_DMA_SEL BIT(1)
102#define JZ_MMC_DMAC_DMA_EN BIT(0)
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000103
Zhou Yanjie80fe4e92019-10-12 13:13:20 +0800104#define JZ_MMC_LPM_DRV_RISING BIT(31)
105#define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
106#define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
107#define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
108#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
109
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000110#define JZ_MMC_CLK_RATE 24000000
Ulf Hanssond422f8b2020-04-14 18:13:58 +0200111#define JZ_MMC_REQ_TIMEOUT_MS 5000
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000112
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -0300113enum jz4740_mmc_version {
114 JZ_MMC_JZ4740,
Paul Cercueila0c938b2018-08-21 17:21:51 +0200115 JZ_MMC_JZ4725B,
Zhou Yanjie2af2af92019-10-12 13:13:17 +0800116 JZ_MMC_JZ4760,
Alex Smith6a787682018-03-28 18:00:51 -0300117 JZ_MMC_JZ4780,
Zhou Yanjiefea5fcc2019-10-12 13:13:19 +0800118 JZ_MMC_X1000,
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -0300119};
120
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000121enum jz4740_mmc_state {
122 JZ4740_MMC_STATE_READ_RESPONSE,
123 JZ4740_MMC_STATE_TRANSFER_DATA,
124 JZ4740_MMC_STATE_SEND_STOP,
125 JZ4740_MMC_STATE_DONE,
126};
127
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300128/*
129 * The MMC core allows to prepare a mmc_request while another mmc_request
130 * is in-flight. This is used via the pre_req/post_req hooks.
131 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
132 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
133 * flags to keep track of the mmc_request mapping state.
134 *
135 * COOKIE_UNMAPPED: the request is not mapped.
136 * COOKIE_PREMAPPED: the request was mapped in pre_req,
137 * and should be unmapped in post_req.
138 * COOKIE_MAPPED: the request was mapped in the irq handler,
139 * and should be unmapped before mmc_request_done is called..
140 */
141enum jz4780_cookie {
142 COOKIE_UNMAPPED = 0,
143 COOKIE_PREMAPPED,
144 COOKIE_MAPPED,
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200145};
146
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000147struct jz4740_mmc_host {
148 struct mmc_host *mmc;
149 struct platform_device *pdev;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000150 struct clk *clk;
151
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -0300152 enum jz4740_mmc_version version;
153
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000154 int irq;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000155
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000156 void __iomem *base;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200157 struct resource *mem_res;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000158 struct mmc_request *req;
159 struct mmc_command *cmd;
160
161 unsigned long waiting;
162
163 uint32_t cmdat;
164
Alex Smith6a787682018-03-28 18:00:51 -0300165 uint32_t irq_mask;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000166
167 spinlock_t lock;
168
169 struct timer_list timeout_timer;
170 struct sg_mapping_iter miter;
171 enum jz4740_mmc_state state;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200172
173 /* DMA support */
174 struct dma_chan *dma_rx;
175 struct dma_chan *dma_tx;
176 bool use_dma;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200177
178/* The DMA trigger level is 8 words, that is to say, the DMA read
179 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
180 * trigger is when data words in MSC_TXFIFO is < 8.
181 */
182#define JZ4740_MMC_FIFO_HALF_SIZE 8
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000183};
184
Alex Smith6a787682018-03-28 18:00:51 -0300185static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
186 uint32_t val)
187{
Paul Cercueila0c938b2018-08-21 17:21:51 +0200188 if (host->version >= JZ_MMC_JZ4725B)
Alex Smith6a787682018-03-28 18:00:51 -0300189 return writel(val, host->base + JZ_REG_MMC_IMASK);
190 else
191 return writew(val, host->base + JZ_REG_MMC_IMASK);
192}
193
194static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
195 uint32_t val)
196{
197 if (host->version >= JZ_MMC_JZ4780)
Paul Cercueil65af9862019-08-10 14:16:07 +0200198 writel(val, host->base + JZ_REG_MMC_IREG);
Alex Smith6a787682018-03-28 18:00:51 -0300199 else
Paul Cercueil65af9862019-08-10 14:16:07 +0200200 writew(val, host->base + JZ_REG_MMC_IREG);
Alex Smith6a787682018-03-28 18:00:51 -0300201}
202
203static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
204{
205 if (host->version >= JZ_MMC_JZ4780)
206 return readl(host->base + JZ_REG_MMC_IREG);
207 else
208 return readw(host->base + JZ_REG_MMC_IREG);
209}
210
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200211/*----------------------------------------------------------------------------*/
212/* DMA infrastructure */
213
214static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
215{
216 if (!host->use_dma)
217 return;
218
219 dma_release_channel(host->dma_tx);
Paul Cercueila474e522021-12-20 19:08:40 +0000220 if (host->dma_rx)
221 dma_release_channel(host->dma_rx);
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200222}
223
224static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
225{
Paul Cercueila474e522021-12-20 19:08:40 +0000226 struct device *dev = mmc_dev(host->mmc);
227
228 host->dma_tx = dma_request_chan(dev, "tx-rx");
229 if (!IS_ERR(host->dma_tx))
230 return 0;
231
232 if (PTR_ERR(host->dma_tx) != -ENODEV) {
233 dev_err(dev, "Failed to get dma tx-rx channel\n");
234 return PTR_ERR(host->dma_tx);
235 }
236
Ezequiel Garciafb0ce9d2018-03-28 18:00:52 -0300237 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
238 if (IS_ERR(host->dma_tx)) {
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200239 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
Ezequiel Garciafb0ce9d2018-03-28 18:00:52 -0300240 return PTR_ERR(host->dma_tx);
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200241 }
242
Ezequiel Garciafb0ce9d2018-03-28 18:00:52 -0300243 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
244 if (IS_ERR(host->dma_rx)) {
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200245 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
Ezequiel Garciafb0ce9d2018-03-28 18:00:52 -0300246 dma_release_channel(host->dma_tx);
247 return PTR_ERR(host->dma_rx);
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200248 }
249
250 return 0;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200251}
252
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200253static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
254 struct mmc_data *data)
255{
Paul Cercueila474e522021-12-20 19:08:40 +0000256 if ((data->flags & MMC_DATA_READ) && host->dma_rx)
257 return host->dma_rx;
258 else
259 return host->dma_tx;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200260}
261
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200262static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
263 struct mmc_data *data)
264{
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200265 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
Heiner Kallweitfeeef092017-03-26 20:45:56 +0200266 enum dma_data_direction dir = mmc_get_dma_dir(data);
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200267
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200268 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300269 data->host_cookie = COOKIE_UNMAPPED;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200270}
271
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300272/* Prepares DMA data for current or next transfer.
273 * A request can be in-flight when this is called.
274 */
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200275static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
276 struct mmc_data *data,
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300277 int cookie)
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200278{
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300279 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
Heiner Kallweitfeeef092017-03-26 20:45:56 +0200280 enum dma_data_direction dir = mmc_get_dma_dir(data);
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300281 int sg_count;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200282
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300283 if (data->host_cookie == COOKIE_PREMAPPED)
284 return data->sg_count;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200285
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300286 sg_count = dma_map_sg(chan->device->dev,
287 data->sg,
288 data->sg_len,
289 dir);
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200290
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300291 if (sg_count <= 0) {
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200292 dev_err(mmc_dev(host->mmc),
293 "Failed to map scatterlist for DMA operation\n");
294 return -EINVAL;
295 }
296
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300297 data->sg_count = sg_count;
298 data->host_cookie = cookie;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200299
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300300 return data->sg_count;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200301}
302
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200303static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
304 struct mmc_data *data)
305{
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300306 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200307 struct dma_async_tx_descriptor *desc;
308 struct dma_slave_config conf = {
309 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
310 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
311 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
312 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
313 };
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300314 int sg_count;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200315
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200316 if (data->flags & MMC_DATA_WRITE) {
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200317 conf.direction = DMA_MEM_TO_DEV;
318 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200319 } else {
320 conf.direction = DMA_DEV_TO_MEM;
321 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200322 }
323
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300324 sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
325 if (sg_count < 0)
326 return sg_count;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200327
328 dmaengine_slave_config(chan, &conf);
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300329 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
330 conf.direction,
331 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200332 if (!desc) {
333 dev_err(mmc_dev(host->mmc),
334 "Failed to allocate DMA %s descriptor",
335 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
336 goto dma_unmap;
337 }
338
339 dmaengine_submit(desc);
340 dma_async_issue_pending(chan);
341
342 return 0;
343
344dma_unmap:
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300345 if (data->host_cookie == COOKIE_MAPPED)
346 jz4740_mmc_dma_unmap(host, data);
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200347 return -ENOMEM;
348}
349
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200350static void jz4740_mmc_pre_request(struct mmc_host *mmc,
Linus Walleijd3c6aac2016-11-23 11:02:24 +0100351 struct mmc_request *mrq)
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200352{
353 struct jz4740_mmc_host *host = mmc_priv(mmc);
354 struct mmc_data *data = mrq->data;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200355
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300356 if (!host->use_dma)
357 return;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200358
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300359 data->host_cookie = COOKIE_UNMAPPED;
360 if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
361 data->host_cookie = COOKIE_UNMAPPED;
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200362}
363
364static void jz4740_mmc_post_request(struct mmc_host *mmc,
365 struct mmc_request *mrq,
366 int err)
367{
368 struct jz4740_mmc_host *host = mmc_priv(mmc);
369 struct mmc_data *data = mrq->data;
370
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300371 if (data && data->host_cookie != COOKIE_UNMAPPED)
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200372 jz4740_mmc_dma_unmap(host, data);
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200373
374 if (err) {
375 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
376
377 dmaengine_terminate_all(chan);
378 }
379}
380
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200381/*----------------------------------------------------------------------------*/
382
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000383static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
384 unsigned int irq, bool enabled)
385{
386 unsigned long flags;
387
388 spin_lock_irqsave(&host->lock, flags);
389 if (enabled)
390 host->irq_mask &= ~irq;
391 else
392 host->irq_mask |= irq;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000393
Alex Smith6a787682018-03-28 18:00:51 -0300394 jz4740_mmc_write_irq_mask(host, host->irq_mask);
Alex Smitha04f0012018-03-28 18:00:43 -0300395 spin_unlock_irqrestore(&host->lock, flags);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000396}
397
398static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
399 bool start_transfer)
400{
401 uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
402
403 if (start_transfer)
404 val |= JZ_MMC_STRPCL_START_OP;
405
406 writew(val, host->base + JZ_REG_MMC_STRPCL);
407}
408
409static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
410{
411 uint32_t status;
412 unsigned int timeout = 1000;
413
414 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
415 do {
416 status = readl(host->base + JZ_REG_MMC_STATUS);
417 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
418}
419
420static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
421{
422 uint32_t status;
423 unsigned int timeout = 1000;
424
425 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
426 udelay(10);
427 do {
428 status = readl(host->base + JZ_REG_MMC_STATUS);
429 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
430}
431
432static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
433{
434 struct mmc_request *req;
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300435 struct mmc_data *data;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000436
437 req = host->req;
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300438 data = req->data;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000439 host->req = NULL;
440
Ezequiel Garcia96e03ff2018-11-20 15:21:21 -0300441 if (data && data->host_cookie == COOKIE_MAPPED)
442 jz4740_mmc_dma_unmap(host, data);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000443 mmc_request_done(host->mmc, req);
444}
445
446static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
447 unsigned int irq)
448{
449 unsigned int timeout = 0x800;
Alex Smith6a787682018-03-28 18:00:51 -0300450 uint32_t status;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000451
452 do {
Alex Smith6a787682018-03-28 18:00:51 -0300453 status = jz4740_mmc_read_irq_reg(host);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000454 } while (!(status & irq) && --timeout);
455
456 if (timeout == 0) {
457 set_bit(0, &host->waiting);
Ulf Hanssond422f8b2020-04-14 18:13:58 +0200458 mod_timer(&host->timeout_timer,
459 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000460 jz4740_mmc_set_irq_enabled(host, irq, true);
461 return true;
462 }
463
464 return false;
465}
466
467static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
468 struct mmc_data *data)
469{
470 int status;
471
472 status = readl(host->base + JZ_REG_MMC_STATUS);
473 if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
474 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
475 host->req->cmd->error = -ETIMEDOUT;
476 data->error = -ETIMEDOUT;
477 } else {
478 host->req->cmd->error = -EIO;
479 data->error = -EIO;
480 }
Paul Cercueil8a489aa2013-06-09 21:10:02 +0200481 } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
482 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
483 host->req->cmd->error = -ETIMEDOUT;
484 data->error = -ETIMEDOUT;
485 } else {
486 host->req->cmd->error = -EIO;
487 data->error = -EIO;
488 }
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000489 }
490}
491
492static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
493 struct mmc_data *data)
494{
495 struct sg_mapping_iter *miter = &host->miter;
496 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
497 uint32_t *buf;
498 bool timeout;
499 size_t i, j;
500
501 while (sg_miter_next(miter)) {
502 buf = miter->addr;
503 i = miter->length / 4;
504 j = i / 8;
505 i = i & 0x7;
506 while (j) {
507 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
508 if (unlikely(timeout))
509 goto poll_timeout;
510
511 writel(buf[0], fifo_addr);
512 writel(buf[1], fifo_addr);
513 writel(buf[2], fifo_addr);
514 writel(buf[3], fifo_addr);
515 writel(buf[4], fifo_addr);
516 writel(buf[5], fifo_addr);
517 writel(buf[6], fifo_addr);
518 writel(buf[7], fifo_addr);
519 buf += 8;
520 --j;
521 }
522 if (unlikely(i)) {
523 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
524 if (unlikely(timeout))
525 goto poll_timeout;
526
527 while (i) {
528 writel(*buf, fifo_addr);
529 ++buf;
530 --i;
531 }
532 }
533 data->bytes_xfered += miter->length;
534 }
535 sg_miter_stop(miter);
536
537 return false;
538
539poll_timeout:
540 miter->consumed = (void *)buf - miter->addr;
541 data->bytes_xfered += miter->consumed;
542 sg_miter_stop(miter);
543
544 return true;
545}
546
547static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
548 struct mmc_data *data)
549{
550 struct sg_mapping_iter *miter = &host->miter;
551 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
552 uint32_t *buf;
553 uint32_t d;
Alex Smith6a787682018-03-28 18:00:51 -0300554 uint32_t status;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000555 size_t i, j;
556 unsigned int timeout;
557
558 while (sg_miter_next(miter)) {
559 buf = miter->addr;
560 i = miter->length;
561 j = i / 32;
562 i = i & 0x1f;
563 while (j) {
564 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
565 if (unlikely(timeout))
566 goto poll_timeout;
567
568 buf[0] = readl(fifo_addr);
569 buf[1] = readl(fifo_addr);
570 buf[2] = readl(fifo_addr);
571 buf[3] = readl(fifo_addr);
572 buf[4] = readl(fifo_addr);
573 buf[5] = readl(fifo_addr);
574 buf[6] = readl(fifo_addr);
575 buf[7] = readl(fifo_addr);
576
577 buf += 8;
578 --j;
579 }
580
581 if (unlikely(i)) {
582 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
583 if (unlikely(timeout))
584 goto poll_timeout;
585
586 while (i >= 4) {
587 *buf++ = readl(fifo_addr);
588 i -= 4;
589 }
590 if (unlikely(i > 0)) {
591 d = readl(fifo_addr);
592 memcpy(buf, &d, i);
593 }
594 }
595 data->bytes_xfered += miter->length;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000596 }
597 sg_miter_stop(miter);
598
599 /* For whatever reason there is sometime one word more in the fifo then
600 * requested */
601 timeout = 1000;
602 status = readl(host->base + JZ_REG_MMC_STATUS);
603 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
604 d = readl(fifo_addr);
605 status = readl(host->base + JZ_REG_MMC_STATUS);
606 }
607
608 return false;
609
610poll_timeout:
611 miter->consumed = (void *)buf - miter->addr;
612 data->bytes_xfered += miter->consumed;
613 sg_miter_stop(miter);
614
615 return true;
616}
617
Kees Cook2ee4f622017-10-24 08:03:45 -0700618static void jz4740_mmc_timeout(struct timer_list *t)
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000619{
Kees Cook2ee4f622017-10-24 08:03:45 -0700620 struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000621
622 if (!test_and_clear_bit(0, &host->waiting))
623 return;
624
625 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
626
627 host->req->cmd->error = -ETIMEDOUT;
628 jz4740_mmc_request_done(host);
629}
630
631static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
632 struct mmc_command *cmd)
633{
634 int i;
635 uint16_t tmp;
636 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
637
638 if (cmd->flags & MMC_RSP_136) {
639 tmp = readw(fifo_addr);
640 for (i = 0; i < 4; ++i) {
641 cmd->resp[i] = tmp << 24;
642 tmp = readw(fifo_addr);
643 cmd->resp[i] |= tmp << 8;
644 tmp = readw(fifo_addr);
645 cmd->resp[i] |= tmp >> 8;
646 }
647 } else {
648 cmd->resp[0] = readw(fifo_addr) << 24;
649 cmd->resp[0] |= readw(fifo_addr) << 8;
650 cmd->resp[0] |= readw(fifo_addr) & 0xff;
651 }
652}
653
654static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
655 struct mmc_command *cmd)
656{
657 uint32_t cmdat = host->cmdat;
658
659 host->cmdat &= ~JZ_MMC_CMDAT_INIT;
660 jz4740_mmc_clock_disable(host);
661
662 host->cmd = cmd;
663
664 if (cmd->flags & MMC_RSP_BUSY)
665 cmdat |= JZ_MMC_CMDAT_BUSY;
666
667 switch (mmc_resp_type(cmd)) {
668 case MMC_RSP_R1B:
669 case MMC_RSP_R1:
670 cmdat |= JZ_MMC_CMDAT_RSP_R1;
671 break;
672 case MMC_RSP_R2:
673 cmdat |= JZ_MMC_CMDAT_RSP_R2;
674 break;
675 case MMC_RSP_R3:
676 cmdat |= JZ_MMC_CMDAT_RSP_R3;
677 break;
678 default:
679 break;
680 }
681
682 if (cmd->data) {
683 cmdat |= JZ_MMC_CMDAT_DATA_EN;
684 if (cmd->data->flags & MMC_DATA_WRITE)
685 cmdat |= JZ_MMC_CMDAT_WRITE;
Alex Smith6a787682018-03-28 18:00:51 -0300686 if (host->use_dma) {
687 /*
周琰杰 (Zhou Yanjie)d1c777e2021-06-10 20:58:50 +0800688 * The JZ4780's MMC controller has integrated DMA ability
Alex Smith6a787682018-03-28 18:00:51 -0300689 * in addition to being able to use the external DMA
690 * controller. It moves DMA control bits to a separate
691 * register. The DMA_SEL bit chooses the external
692 * controller over the integrated one. Earlier SoCs
693 * can only use the external controller, and have a
694 * single DMA enable bit in CMDAT.
695 */
696 if (host->version >= JZ_MMC_JZ4780) {
697 writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
698 host->base + JZ_REG_MMC_DMAC);
699 } else {
700 cmdat |= JZ_MMC_CMDAT_DMA_EN;
701 }
702 } else if (host->version >= JZ_MMC_JZ4780) {
703 writel(0, host->base + JZ_REG_MMC_DMAC);
704 }
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000705
706 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
707 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
708 }
709
710 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
711 writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
712 writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
713
714 jz4740_mmc_clock_enable(host, 1);
715}
716
717static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
718{
719 struct mmc_command *cmd = host->req->cmd;
720 struct mmc_data *data = cmd->data;
721 int direction;
722
723 if (data->flags & MMC_DATA_READ)
724 direction = SG_MITER_TO_SG;
725 else
726 direction = SG_MITER_FROM_SG;
727
728 sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
729}
730
731
732static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
733{
734 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
735 struct mmc_command *cmd = host->req->cmd;
736 struct mmc_request *req = host->req;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200737 struct mmc_data *data = cmd->data;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000738 bool timeout = false;
739
740 if (cmd->error)
741 host->state = JZ4740_MMC_STATE_DONE;
742
743 switch (host->state) {
744 case JZ4740_MMC_STATE_READ_RESPONSE:
745 if (cmd->flags & MMC_RSP_PRESENT)
746 jz4740_mmc_read_response(host, cmd);
747
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200748 if (!data)
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000749 break;
750
751 jz_mmc_prepare_data_transfer(host);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500752 fallthrough;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000753
754 case JZ4740_MMC_STATE_TRANSFER_DATA:
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200755 if (host->use_dma) {
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200756 /* Use DMA if enabled.
757 * Data transfer direction is defined later by
758 * relying on data flags in
759 * jz4740_mmc_prepare_dma_data() and
760 * jz4740_mmc_start_dma_transfer().
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200761 */
762 timeout = jz4740_mmc_start_dma_transfer(host, data);
763 data->bytes_xfered = data->blocks * data->blksz;
764 } else if (data->flags & MMC_DATA_READ)
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200765 /* Use PIO if DMA is not enabled.
766 * Data transfer direction was defined before
767 * by relying on data flags in
768 * jz_mmc_prepare_data_transfer().
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200769 */
770 timeout = jz4740_mmc_read_data(host, data);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000771 else
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200772 timeout = jz4740_mmc_write_data(host, data);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000773
774 if (unlikely(timeout)) {
775 host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
776 break;
777 }
778
Apelete Seketeli7ca27a62014-07-21 06:37:44 +0200779 jz4740_mmc_transfer_check_state(host, data);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000780
781 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
782 if (unlikely(timeout)) {
783 host->state = JZ4740_MMC_STATE_SEND_STOP;
784 break;
785 }
Alex Smith6a787682018-03-28 18:00:51 -0300786 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500787 fallthrough;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000788
789 case JZ4740_MMC_STATE_SEND_STOP:
790 if (!req->stop)
791 break;
792
793 jz4740_mmc_send_command(host, req->stop);
794
Alex Smith1acee842014-04-29 13:54:54 +0100795 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
796 timeout = jz4740_mmc_poll_irq(host,
797 JZ_MMC_IRQ_PRG_DONE);
798 if (timeout) {
799 host->state = JZ4740_MMC_STATE_DONE;
800 break;
801 }
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000802 }
Gustavo A. R. Silvaf95deae2021-07-13 13:23:21 -0500803 fallthrough;
804
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000805 case JZ4740_MMC_STATE_DONE:
806 break;
807 }
808
809 if (!timeout)
810 jz4740_mmc_request_done(host);
811
812 return IRQ_HANDLED;
813}
814
815static irqreturn_t jz_mmc_irq(int irq, void *devid)
816{
817 struct jz4740_mmc_host *host = devid;
818 struct mmc_command *cmd = host->cmd;
Alex Smith6a787682018-03-28 18:00:51 -0300819 uint32_t irq_reg, status, tmp;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000820
Alex Smith6a787682018-03-28 18:00:51 -0300821 status = readl(host->base + JZ_REG_MMC_STATUS);
822 irq_reg = jz4740_mmc_read_irq_reg(host);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000823
824 tmp = irq_reg;
825 irq_reg &= ~host->irq_mask;
826
827 tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
828 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
829
830 if (tmp != irq_reg)
Alex Smith6a787682018-03-28 18:00:51 -0300831 jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000832
833 if (irq_reg & JZ_MMC_IRQ_SDIO) {
Alex Smith6a787682018-03-28 18:00:51 -0300834 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000835 mmc_signal_sdio_irq(host->mmc);
836 irq_reg &= ~JZ_MMC_IRQ_SDIO;
837 }
838
839 if (host->req && cmd && irq_reg) {
840 if (test_and_clear_bit(0, &host->waiting)) {
841 del_timer(&host->timeout_timer);
842
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000843 if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
Paul Cercueil65af9862019-08-10 14:16:07 +0200844 cmd->error = -ETIMEDOUT;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000845 } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
Paul Cercueil65af9862019-08-10 14:16:07 +0200846 cmd->error = -EIO;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000847 } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
848 JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
Paul Cercueil65af9862019-08-10 14:16:07 +0200849 if (cmd->data)
850 cmd->data->error = -EIO;
851 cmd->error = -EIO;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000852 }
853
854 jz4740_mmc_set_irq_enabled(host, irq_reg, false);
Alex Smith6a787682018-03-28 18:00:51 -0300855 jz4740_mmc_write_irq_reg(host, irq_reg);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000856
857 return IRQ_WAKE_THREAD;
858 }
859 }
860
861 return IRQ_HANDLED;
862}
863
864static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
865{
866 int div = 0;
867 int real_rate;
868
869 jz4740_mmc_clock_disable(host);
Alex Smith6861fce2018-03-28 18:00:50 -0300870 clk_set_rate(host->clk, host->mmc->f_max);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000871
872 real_rate = clk_get_rate(host->clk);
873
874 while (real_rate > rate && div < 7) {
875 ++div;
876 real_rate >>= 1;
877 }
878
879 writew(div, host->base + JZ_REG_MMC_CLKRT);
Zhou Yanjie80fe4e92019-10-12 13:13:20 +0800880
881 if (real_rate > 25000000) {
周琰杰 (Zhou Yanjie)d1c777e2021-06-10 20:58:50 +0800882 if (host->version >= JZ_MMC_JZ4780) {
Zhou Yanjie80fe4e92019-10-12 13:13:20 +0800883 writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
884 JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
885 JZ_MMC_LPM_LOW_POWER_MODE_EN,
886 host->base + JZ_REG_MMC_LPM);
887 } else if (host->version >= JZ_MMC_JZ4760) {
888 writel(JZ_MMC_LPM_DRV_RISING |
889 JZ_MMC_LPM_LOW_POWER_MODE_EN,
890 host->base + JZ_REG_MMC_LPM);
891 } else if (host->version >= JZ_MMC_JZ4725B)
892 writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
893 host->base + JZ_REG_MMC_LPM);
894 }
895
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000896 return real_rate;
897}
898
899static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
900{
901 struct jz4740_mmc_host *host = mmc_priv(mmc);
902
903 host->req = req;
904
Alex Smith6a787682018-03-28 18:00:51 -0300905 jz4740_mmc_write_irq_reg(host, ~0);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000906 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
907
908 host->state = JZ4740_MMC_STATE_READ_RESPONSE;
909 set_bit(0, &host->waiting);
Ulf Hanssond422f8b2020-04-14 18:13:58 +0200910 mod_timer(&host->timeout_timer,
911 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000912 jz4740_mmc_send_command(host, req->cmd);
913}
914
915static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
916{
917 struct jz4740_mmc_host *host = mmc_priv(mmc);
918 if (ios->clock)
919 jz4740_mmc_set_clock_rate(host, ios->clock);
920
921 switch (ios->power_mode) {
922 case MMC_POWER_UP:
923 jz4740_mmc_reset(host);
Paul Cercueil05395522019-01-25 17:09:25 -0300924 if (!IS_ERR(mmc->supply.vmmc))
925 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000926 host->cmdat |= JZ_MMC_CMDAT_INIT;
Lars-Peter Clausenfca96612013-05-12 20:12:38 +0200927 clk_prepare_enable(host->clk);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000928 break;
929 case MMC_POWER_ON:
930 break;
931 default:
Paul Cercueil05395522019-01-25 17:09:25 -0300932 if (!IS_ERR(mmc->supply.vmmc))
933 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
Lars-Peter Clausenfca96612013-05-12 20:12:38 +0200934 clk_disable_unprepare(host->clk);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000935 break;
936 }
937
938 switch (ios->bus_width) {
939 case MMC_BUS_WIDTH_1:
Zhou Yanjiea02f8f42019-10-12 13:13:15 +0800940 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000941 break;
942 case MMC_BUS_WIDTH_4:
Zhou Yanjiea02f8f42019-10-12 13:13:15 +0800943 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000944 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
945 break;
Zhou Yanjiea02f8f42019-10-12 13:13:15 +0800946 case MMC_BUS_WIDTH_8:
947 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
948 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
949 break;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000950 default:
951 break;
952 }
953}
954
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000955static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
956{
957 struct jz4740_mmc_host *host = mmc_priv(mmc);
958 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
959}
960
961static const struct mmc_host_ops jz4740_mmc_ops = {
962 .request = jz4740_mmc_request,
Apelete Seketelibb2f4592014-07-21 06:37:45 +0200963 .pre_req = jz4740_mmc_pre_request,
964 .post_req = jz4740_mmc_post_request,
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000965 .set_ios = jz4740_mmc_set_ios,
Lars-Peter Clausen58e300a2013-06-09 21:10:04 +0200966 .get_ro = mmc_gpio_get_ro,
967 .get_cd = mmc_gpio_get_cd,
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000968 .enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
969};
970
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -0300971static const struct of_device_id jz4740_mmc_of_match[] = {
972 { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
Paul Cercueila0c938b2018-08-21 17:21:51 +0200973 { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
Zhou Yanjie2af2af92019-10-12 13:13:17 +0800974 { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
周琰杰 (Zhou Yanjie)d1c777e2021-06-10 20:58:50 +0800975 { .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
Alex Smith6a787682018-03-28 18:00:51 -0300976 { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
Zhou Yanjiefea5fcc2019-10-12 13:13:19 +0800977 { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -0300978 {},
979};
980MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
981
Bill Pembertonc3be1ef2012-11-19 13:23:06 -0500982static int jz4740_mmc_probe(struct platform_device* pdev)
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000983{
984 int ret;
985 struct mmc_host *mmc;
986 struct jz4740_mmc_host *host;
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -0300987 const struct of_device_id *match;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000988
989 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
990 if (!mmc) {
991 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
992 return -ENOMEM;
993 }
994
995 host = mmc_priv(mmc);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +0000996
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -0300997 match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
998 if (match) {
999 host->version = (enum jz4740_mmc_version)match->data;
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -03001000 } else {
1001 /* JZ4740 should be the only one using legacy probe */
1002 host->version = JZ_MMC_JZ4740;
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -03001003 }
1004
Paul Cercueil05395522019-01-25 17:09:25 -03001005 ret = mmc_of_parse(mmc);
1006 if (ret) {
Krzysztof Kozlowski295208a2020-09-02 21:36:51 +02001007 dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
Paul Cercueil05395522019-01-25 17:09:25 -03001008 goto err_free_host;
1009 }
1010
1011 mmc_regulator_get_supply(mmc);
1012
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001013 host->irq = platform_get_irq(pdev, 0);
1014 if (host->irq < 0) {
1015 ret = host->irq;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001016 goto err_free_host;
1017 }
1018
Lars-Peter Clausen017d84b2013-06-09 21:10:05 +02001019 host->clk = devm_clk_get(&pdev->dev, "mmc");
Jamie Iles3119cbd2011-01-11 12:43:50 +00001020 if (IS_ERR(host->clk)) {
1021 ret = PTR_ERR(host->clk);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001022 dev_err(&pdev->dev, "Failed to get mmc clock\n");
1023 goto err_free_host;
1024 }
1025
Apelete Seketeli7ca27a62014-07-21 06:37:44 +02001026 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1027 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
Wei Yongjun3e7e8c12013-06-29 08:44:53 +08001028 if (IS_ERR(host->base)) {
1029 ret = PTR_ERR(host->base);
Lars-Peter Clausen017d84b2013-06-09 21:10:05 +02001030 goto err_free_host;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001031 }
1032
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001033 mmc->ops = &jz4740_mmc_ops;
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -03001034 if (!mmc->f_max)
1035 mmc->f_max = JZ_MMC_CLK_RATE;
1036 mmc->f_min = mmc->f_max / 128;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001037 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001038
Ulf Hanssond422f8b2020-04-14 18:13:58 +02001039 /*
1040 * We use a fixed timeout of 5s, hence inform the core about it. A
1041 * future improvement should instead respect the cmd->busy_timeout.
1042 */
1043 mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
1044
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001045 mmc->max_blk_size = (1 << 10) - 1;
1046 mmc->max_blk_count = (1 << 15) - 1;
1047 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1048
Martin K. Petersena36274e2010-09-10 01:33:59 -04001049 mmc->max_segs = 128;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001050 mmc->max_seg_size = mmc->max_req_size;
1051
1052 host->mmc = mmc;
1053 host->pdev = pdev;
1054 spin_lock_init(&host->lock);
Alex Smith6a787682018-03-28 18:00:51 -03001055 host->irq_mask = ~0;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001056
Zubair Lutfullah Kakakhel436a3cf2018-03-28 18:00:47 -03001057 jz4740_mmc_reset(host);
1058
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001059 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1060 dev_name(&pdev->dev), host);
1061 if (ret) {
1062 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
Linus Walleij0f6f3232018-11-12 15:12:32 +01001063 goto err_free_host;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001064 }
1065
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001066 jz4740_mmc_clock_disable(host);
Kees Cook2ee4f622017-10-24 08:03:45 -07001067 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001068
Paul Cercueil7e8466e2018-03-28 18:00:44 -03001069 ret = jz4740_mmc_acquire_dma_channels(host);
1070 if (ret == -EPROBE_DEFER)
1071 goto err_free_irq;
1072 host->use_dma = !ret;
Apelete Seketeli7ca27a62014-07-21 06:37:44 +02001073
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001074 platform_set_drvdata(pdev, host);
1075 ret = mmc_add_host(mmc);
1076
1077 if (ret) {
1078 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
Paul Cercueil7e8466e2018-03-28 18:00:44 -03001079 goto err_release_dma;
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001080 }
Zhou Yanjiefea5fcc2019-10-12 13:13:19 +08001081 dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001082
Apelete Seketeli7ca27a62014-07-21 06:37:44 +02001083 dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1084 host->use_dma ? "DMA" : "PIO",
Zhou Yanjiea02f8f42019-10-12 13:13:15 +08001085 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
1086 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
Apelete Seketeli7ca27a62014-07-21 06:37:44 +02001087
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001088 return 0;
1089
Paul Cercueil7e8466e2018-03-28 18:00:44 -03001090err_release_dma:
1091 if (host->use_dma)
1092 jz4740_mmc_release_dma_channels(host);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001093err_free_irq:
1094 free_irq(host->irq, host);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001095err_free_host:
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001096 mmc_free_host(mmc);
1097
1098 return ret;
1099}
1100
Bill Pemberton6e0ee712012-11-19 13:26:03 -05001101static int jz4740_mmc_remove(struct platform_device *pdev)
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001102{
1103 struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1104
1105 del_timer_sync(&host->timeout_timer);
1106 jz4740_mmc_set_irq_enabled(host, 0xff, false);
1107 jz4740_mmc_reset(host);
1108
1109 mmc_remove_host(host->mmc);
1110
1111 free_irq(host->irq, host);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001112
Apelete Seketeli7ca27a62014-07-21 06:37:44 +02001113 if (host->use_dma)
1114 jz4740_mmc_release_dma_channels(host);
1115
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001116 mmc_free_host(host->mmc);
1117
1118 return 0;
1119}
1120
Paul Cercueile0d64ec2021-12-07 00:21:01 +00001121static int jz4740_mmc_suspend(struct device *dev)
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001122{
Paul Cercueilfa5ed6b2017-05-12 18:53:03 +02001123 return pinctrl_pm_select_sleep_state(dev);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001124}
1125
Paul Cercueile0d64ec2021-12-07 00:21:01 +00001126static int jz4740_mmc_resume(struct device *dev)
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001127{
Ulf Hanssona62ff542019-12-06 18:08:20 +01001128 return pinctrl_select_default_state(dev);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001129}
1130
Paul Cercueile0d64ec2021-12-07 00:21:01 +00001131DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
Lars-Peter Clausen5d5c0352013-06-09 21:10:03 +02001132 jz4740_mmc_resume);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001133
1134static struct platform_driver jz4740_mmc_driver = {
1135 .probe = jz4740_mmc_probe,
Bill Pemberton0433c142012-11-19 13:20:26 -05001136 .remove = jz4740_mmc_remove,
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001137 .driver = {
1138 .name = "jz4740-mmc",
Douglas Anderson21b2cec2020-09-03 16:24:36 -07001139 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Ezequiel Garcia61e11eb2018-03-28 18:00:48 -03001140 .of_match_table = of_match_ptr(jz4740_mmc_of_match),
Paul Cercueile0d64ec2021-12-07 00:21:01 +00001141 .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001142 },
1143};
1144
Axel Lind1f81a62011-11-26 12:55:43 +08001145module_platform_driver(jz4740_mmc_driver);
Lars-Peter Clausen61bfbdb2010-07-15 20:06:04 +00001146
1147MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1148MODULE_LICENSE("GPL");
1149MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");