blob: 546d70cc1e2182990ef0559105ec84854822cec4 [file] [log] [blame]
Leilk Liua5682312015-08-07 15:19:50 +08001/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Leilk Liu <leilk.liu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/device.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/platform_data/spi-mt65xx.h>
24#include <linux/pm_runtime.h>
25#include <linux/spi/spi.h>
26
27#define SPI_CFG0_REG 0x0000
28#define SPI_CFG1_REG 0x0004
29#define SPI_TX_SRC_REG 0x0008
30#define SPI_RX_DST_REG 0x000c
31#define SPI_TX_DATA_REG 0x0010
32#define SPI_RX_DATA_REG 0x0014
33#define SPI_CMD_REG 0x0018
34#define SPI_STATUS0_REG 0x001c
35#define SPI_PAD_SEL_REG 0x0024
36
37#define SPI_CFG0_SCK_HIGH_OFFSET 0
38#define SPI_CFG0_SCK_LOW_OFFSET 8
39#define SPI_CFG0_CS_HOLD_OFFSET 16
40#define SPI_CFG0_CS_SETUP_OFFSET 24
41
42#define SPI_CFG1_CS_IDLE_OFFSET 0
43#define SPI_CFG1_PACKET_LOOP_OFFSET 8
44#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
45#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
46
47#define SPI_CFG1_CS_IDLE_MASK 0xff
48#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
49#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
50
51#define SPI_CMD_ACT_OFFSET 0
52#define SPI_CMD_RESUME_OFFSET 1
53#define SPI_CMD_CPHA_OFFSET 8
54#define SPI_CMD_CPOL_OFFSET 9
55#define SPI_CMD_TXMSBF_OFFSET 12
56#define SPI_CMD_RXMSBF_OFFSET 13
57#define SPI_CMD_RX_ENDIAN_OFFSET 14
58#define SPI_CMD_TX_ENDIAN_OFFSET 15
59
60#define SPI_CMD_RST BIT(2)
61#define SPI_CMD_PAUSE_EN BIT(4)
62#define SPI_CMD_DEASSERT BIT(5)
63#define SPI_CMD_CPHA BIT(8)
64#define SPI_CMD_CPOL BIT(9)
65#define SPI_CMD_RX_DMA BIT(10)
66#define SPI_CMD_TX_DMA BIT(11)
67#define SPI_CMD_TXMSBF BIT(12)
68#define SPI_CMD_RXMSBF BIT(13)
69#define SPI_CMD_RX_ENDIAN BIT(14)
70#define SPI_CMD_TX_ENDIAN BIT(15)
71#define SPI_CMD_FINISH_IE BIT(16)
72#define SPI_CMD_PAUSE_IE BIT(17)
73
74#define MTK_SPI_QUIRK_PAD_SELECT 1
75/* Must explicitly send dummy Tx bytes to do Rx only transfer */
76#define MTK_SPI_QUIRK_MUST_TX 1
77
78#define MT8173_SPI_MAX_PAD_SEL 3
79
80#define MTK_SPI_IDLE 0
81#define MTK_SPI_PAUSED 1
82
83#define MTK_SPI_MAX_FIFO_SIZE 32
84#define MTK_SPI_PACKET_SIZE 1024
85
86struct mtk_spi_compatible {
87 u32 need_pad_sel;
88 u32 must_tx;
89};
90
91struct mtk_spi {
92 void __iomem *base;
93 u32 state;
94 u32 pad_sel;
95 struct clk *spi_clk, *parent_clk;
96 struct spi_transfer *cur_transfer;
97 u32 xfer_len;
98 struct scatterlist *tx_sgl, *rx_sgl;
99 u32 tx_sgl_len, rx_sgl_len;
100 const struct mtk_spi_compatible *dev_comp;
101};
102
103static const struct mtk_spi_compatible mt6589_compat = {
104 .need_pad_sel = 0,
105 .must_tx = 0,
106};
107
108static const struct mtk_spi_compatible mt8135_compat = {
109 .need_pad_sel = 0,
110 .must_tx = 0,
111};
112
113static const struct mtk_spi_compatible mt8173_compat = {
114 .need_pad_sel = MTK_SPI_QUIRK_PAD_SELECT,
115 .must_tx = MTK_SPI_QUIRK_MUST_TX,
116};
117
118/*
119 * A piece of default chip info unless the platform
120 * supplies it.
121 */
122static const struct mtk_chip_config mtk_default_chip_info = {
123 .rx_mlsb = 1,
124 .tx_mlsb = 1,
125 .tx_endian = 0,
126 .rx_endian = 0,
127};
128
129static const struct of_device_id mtk_spi_of_match[] = {
130 { .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat },
131 { .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat },
132 { .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat },
133 {}
134};
135MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
136
137static void mtk_spi_reset(struct mtk_spi *mdata)
138{
139 u32 reg_val;
140
141 /* set the software reset bit in SPI_CMD_REG. */
142 reg_val = readl(mdata->base + SPI_CMD_REG);
143 reg_val |= SPI_CMD_RST;
144 writel(reg_val, mdata->base + SPI_CMD_REG);
145
146 reg_val = readl(mdata->base + SPI_CMD_REG);
147 reg_val &= ~SPI_CMD_RST;
148 writel(reg_val, mdata->base + SPI_CMD_REG);
149}
150
151static void mtk_spi_config(struct mtk_spi *mdata,
152 struct mtk_chip_config *chip_config)
153{
154 u32 reg_val;
155
156 reg_val = readl(mdata->base + SPI_CMD_REG);
157
158 /* set the mlsbx and mlsbtx */
159 reg_val &= ~(SPI_CMD_TXMSBF | SPI_CMD_RXMSBF);
160 reg_val |= (chip_config->tx_mlsb << SPI_CMD_TXMSBF_OFFSET);
161 reg_val |= (chip_config->rx_mlsb << SPI_CMD_RXMSBF_OFFSET);
162
163 /* set the tx/rx endian */
164 reg_val &= ~(SPI_CMD_TX_ENDIAN | SPI_CMD_RX_ENDIAN);
165 reg_val |= (chip_config->tx_endian << SPI_CMD_TX_ENDIAN_OFFSET);
166 reg_val |= (chip_config->rx_endian << SPI_CMD_RX_ENDIAN_OFFSET);
167
168 /* set finish and pause interrupt always enable */
169 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_EN;
170
171 /* disable dma mode */
172 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
173
174 /* disable deassert mode */
175 reg_val &= ~SPI_CMD_DEASSERT;
176
177 writel(reg_val, mdata->base + SPI_CMD_REG);
178
179 /* pad select */
180 if (mdata->dev_comp->need_pad_sel)
181 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
182}
183
184static int mtk_spi_prepare_hardware(struct spi_master *master)
185{
186 struct spi_transfer *trans;
187 struct mtk_spi *mdata = spi_master_get_devdata(master);
188 struct spi_message *msg = master->cur_msg;
Leilk Liua5682312015-08-07 15:19:50 +0800189
190 trans = list_first_entry(&msg->transfers, struct spi_transfer,
191 transfer_list);
192 if (trans->cs_change == 0) {
193 mdata->state = MTK_SPI_IDLE;
194 mtk_spi_reset(mdata);
195 }
196
Leilk Liua5682312015-08-07 15:19:50 +0800197 return 0;
198}
199
200static int mtk_spi_prepare_message(struct spi_master *master,
201 struct spi_message *msg)
202{
203 u32 reg_val;
204 u8 cpha, cpol;
205 struct mtk_chip_config *chip_config;
206 struct spi_device *spi = msg->spi;
207 struct mtk_spi *mdata = spi_master_get_devdata(master);
208
209 cpha = spi->mode & SPI_CPHA ? 1 : 0;
210 cpol = spi->mode & SPI_CPOL ? 1 : 0;
211
212 reg_val = readl(mdata->base + SPI_CMD_REG);
213 reg_val &= ~(SPI_CMD_CPHA | SPI_CMD_CPOL);
214 reg_val |= (cpha << SPI_CMD_CPHA_OFFSET);
215 reg_val |= (cpol << SPI_CMD_CPOL_OFFSET);
216 writel(reg_val, mdata->base + SPI_CMD_REG);
217
218 chip_config = spi->controller_data;
219 if (!chip_config) {
220 chip_config = (void *)&mtk_default_chip_info;
221 spi->controller_data = chip_config;
222 }
223 mtk_spi_config(mdata, chip_config);
224
225 return 0;
226}
227
228static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
229{
230 u32 reg_val;
231 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
232
233 reg_val = readl(mdata->base + SPI_CMD_REG);
234 if (!enable)
235 reg_val |= SPI_CMD_PAUSE_EN;
236 else
237 reg_val &= ~SPI_CMD_PAUSE_EN;
238 writel(reg_val, mdata->base + SPI_CMD_REG);
239}
240
241static void mtk_spi_prepare_transfer(struct spi_master *master,
242 struct spi_transfer *xfer)
243{
244 u32 spi_clk_hz, div, high_time, low_time, holdtime,
245 setuptime, cs_idletime, reg_val = 0;
246 struct mtk_spi *mdata = spi_master_get_devdata(master);
247
248 spi_clk_hz = clk_get_rate(mdata->spi_clk);
249 if (xfer->speed_hz < spi_clk_hz / 2)
250 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
251 else
252 div = 1;
253
254 high_time = (div + 1) / 2;
255 low_time = (div + 1) / 2;
256 holdtime = (div + 1) / 2 * 2;
257 setuptime = (div + 1) / 2 * 2;
258 cs_idletime = (div + 1) / 2 * 2;
259
260 reg_val |= (((high_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET);
261 reg_val |= (((low_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
262 reg_val |= (((holdtime - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
263 reg_val |= (((setuptime - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
264 writel(reg_val, mdata->base + SPI_CFG0_REG);
265
266 reg_val = readl(mdata->base + SPI_CFG1_REG);
267 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
268 reg_val |= (((cs_idletime - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
269 writel(reg_val, mdata->base + SPI_CFG1_REG);
270}
271
272static void mtk_spi_setup_packet(struct spi_master *master)
273{
274 u32 packet_size, packet_loop, reg_val;
275 struct mtk_spi *mdata = spi_master_get_devdata(master);
276
277 packet_size = min_t(unsigned, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
278 packet_loop = mdata->xfer_len / packet_size;
279
280 reg_val = readl(mdata->base + SPI_CFG1_REG);
281 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK + SPI_CFG1_PACKET_LOOP_MASK);
282 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
283 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
284 writel(reg_val, mdata->base + SPI_CFG1_REG);
285}
286
287static void mtk_spi_enable_transfer(struct spi_master *master)
288{
289 int cmd;
290 struct mtk_spi *mdata = spi_master_get_devdata(master);
291
292 cmd = readl(mdata->base + SPI_CMD_REG);
293 if (mdata->state == MTK_SPI_IDLE)
294 cmd |= 1 << SPI_CMD_ACT_OFFSET;
295 else
296 cmd |= 1 << SPI_CMD_RESUME_OFFSET;
297 writel(cmd, mdata->base + SPI_CMD_REG);
298}
299
300static int mtk_spi_get_mult_delta(int xfer_len)
301{
302 int mult_delta;
303
304 if (xfer_len > MTK_SPI_PACKET_SIZE)
305 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
306 else
307 mult_delta = 0;
308
309 return mult_delta;
310}
311
312static void mtk_spi_update_mdata_len(struct spi_master *master)
313{
314 int mult_delta;
315 struct mtk_spi *mdata = spi_master_get_devdata(master);
316
317 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
318 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
319 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
320 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
321 mdata->rx_sgl_len = mult_delta;
322 mdata->tx_sgl_len -= mdata->xfer_len;
323 } else {
324 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
325 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
326 mdata->tx_sgl_len = mult_delta;
327 mdata->rx_sgl_len -= mdata->xfer_len;
328 }
329 } else if (mdata->tx_sgl_len) {
330 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
331 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
332 mdata->tx_sgl_len = mult_delta;
333 } else if (mdata->rx_sgl_len) {
334 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
335 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
336 mdata->rx_sgl_len = mult_delta;
337 }
338}
339
340static void mtk_spi_setup_dma_addr(struct spi_master *master,
341 struct spi_transfer *xfer)
342{
343 struct mtk_spi *mdata = spi_master_get_devdata(master);
344
345 if (mdata->tx_sgl)
Leilk Liu39ba9282015-08-13 20:06:41 +0800346 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
Leilk Liua5682312015-08-07 15:19:50 +0800347 if (mdata->rx_sgl)
Leilk Liu39ba9282015-08-13 20:06:41 +0800348 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
Leilk Liua5682312015-08-07 15:19:50 +0800349}
350
351static int mtk_spi_fifo_transfer(struct spi_master *master,
352 struct spi_device *spi,
353 struct spi_transfer *xfer)
354{
355 int cnt, i;
356 struct mtk_spi *mdata = spi_master_get_devdata(master);
357
358 mdata->cur_transfer = xfer;
359 mdata->xfer_len = xfer->len;
360 mtk_spi_prepare_transfer(master, xfer);
361 mtk_spi_setup_packet(master);
362
363 if (xfer->len % 4)
364 cnt = xfer->len / 4 + 1;
365 else
366 cnt = xfer->len / 4;
367
368 for (i = 0; i < cnt; i++)
369 writel(*((u32 *)xfer->tx_buf + i),
370 mdata->base + SPI_TX_DATA_REG);
371
372 mtk_spi_enable_transfer(master);
373
374 return 1;
375}
376
377static int mtk_spi_dma_transfer(struct spi_master *master,
378 struct spi_device *spi,
379 struct spi_transfer *xfer)
380{
381 int cmd;
382 struct mtk_spi *mdata = spi_master_get_devdata(master);
383
384 mdata->tx_sgl = NULL;
385 mdata->rx_sgl = NULL;
386 mdata->tx_sgl_len = 0;
387 mdata->rx_sgl_len = 0;
388 mdata->cur_transfer = xfer;
389
390 mtk_spi_prepare_transfer(master, xfer);
391
392 cmd = readl(mdata->base + SPI_CMD_REG);
393 if (xfer->tx_buf)
394 cmd |= SPI_CMD_TX_DMA;
395 if (xfer->rx_buf)
396 cmd |= SPI_CMD_RX_DMA;
397 writel(cmd, mdata->base + SPI_CMD_REG);
398
399 if (xfer->tx_buf)
400 mdata->tx_sgl = xfer->tx_sg.sgl;
401 if (xfer->rx_buf)
402 mdata->rx_sgl = xfer->rx_sg.sgl;
403
404 if (mdata->tx_sgl) {
405 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
406 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
407 }
408 if (mdata->rx_sgl) {
409 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
410 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
411 }
412
413 mtk_spi_update_mdata_len(master);
414 mtk_spi_setup_packet(master);
415 mtk_spi_setup_dma_addr(master, xfer);
416 mtk_spi_enable_transfer(master);
417
418 return 1;
419}
420
421static int mtk_spi_transfer_one(struct spi_master *master,
422 struct spi_device *spi,
423 struct spi_transfer *xfer)
424{
425 if (master->can_dma(master, spi, xfer))
426 return mtk_spi_dma_transfer(master, spi, xfer);
427 else
428 return mtk_spi_fifo_transfer(master, spi, xfer);
429}
430
431static bool mtk_spi_can_dma(struct spi_master *master,
432 struct spi_device *spi,
433 struct spi_transfer *xfer)
434{
435 return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
436}
437
438static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
439{
440 u32 cmd, reg_val, i;
441 struct spi_master *master = dev_id;
442 struct mtk_spi *mdata = spi_master_get_devdata(master);
443 struct spi_transfer *trans = mdata->cur_transfer;
444
445 reg_val = readl(mdata->base + SPI_STATUS0_REG);
446 if (reg_val & 0x2)
447 mdata->state = MTK_SPI_PAUSED;
448 else
449 mdata->state = MTK_SPI_IDLE;
450
451 if (!master->can_dma(master, master->cur_msg->spi, trans)) {
452 /* xfer len is not N*4 bytes every time in a transfer,
453 * but SPI_RX_DATA_REG must reads 4 bytes once,
454 * so rx buffer byte by byte.
455 */
456 if (trans->rx_buf) {
457 for (i = 0; i < mdata->xfer_len; i++) {
458 if (i % 4 == 0)
459 reg_val =
460 readl(mdata->base + SPI_RX_DATA_REG);
461 *((u8 *)(trans->rx_buf + i)) =
462 (reg_val >> ((i % 4) * 8)) & 0xff;
463 }
464 }
465 spi_finalize_current_transfer(master);
466 return IRQ_HANDLED;
467 }
468
469 if (mdata->tx_sgl)
470 trans->tx_dma += mdata->xfer_len;
471 if (mdata->rx_sgl)
472 trans->rx_dma += mdata->xfer_len;
473
474 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
475 mdata->tx_sgl = sg_next(mdata->tx_sgl);
476 if (mdata->tx_sgl) {
477 trans->tx_dma = sg_dma_address(mdata->tx_sgl);
478 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
479 }
480 }
481 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
482 mdata->rx_sgl = sg_next(mdata->rx_sgl);
483 if (mdata->rx_sgl) {
484 trans->rx_dma = sg_dma_address(mdata->rx_sgl);
485 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
486 }
487 }
488
489 if (!mdata->tx_sgl && !mdata->rx_sgl) {
490 /* spi disable dma */
491 cmd = readl(mdata->base + SPI_CMD_REG);
492 cmd &= ~SPI_CMD_TX_DMA;
493 cmd &= ~SPI_CMD_RX_DMA;
494 writel(cmd, mdata->base + SPI_CMD_REG);
495
496 spi_finalize_current_transfer(master);
497 return IRQ_HANDLED;
498 }
499
500 mtk_spi_update_mdata_len(master);
501 mtk_spi_setup_packet(master);
502 mtk_spi_setup_dma_addr(master, trans);
503 mtk_spi_enable_transfer(master);
504
505 return IRQ_HANDLED;
506}
507
508static int mtk_spi_probe(struct platform_device *pdev)
509{
510 struct spi_master *master;
511 struct mtk_spi *mdata;
512 const struct of_device_id *of_id;
513 struct resource *res;
514 int irq, ret;
515
516 master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
517 if (!master) {
518 dev_err(&pdev->dev, "failed to alloc spi master\n");
519 return -ENOMEM;
520 }
521
522 master->auto_runtime_pm = true;
523 master->dev.of_node = pdev->dev.of_node;
524 master->mode_bits = SPI_CPOL | SPI_CPHA;
525
526 master->set_cs = mtk_spi_set_cs;
527 master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
Leilk Liua5682312015-08-07 15:19:50 +0800528 master->prepare_message = mtk_spi_prepare_message;
529 master->transfer_one = mtk_spi_transfer_one;
530 master->can_dma = mtk_spi_can_dma;
531
532 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
533 if (!of_id) {
534 dev_err(&pdev->dev, "failed to probe of_node\n");
535 ret = -EINVAL;
536 goto err_put_master;
537 }
538
539 mdata = spi_master_get_devdata(master);
540 mdata->dev_comp = of_id->data;
541 if (mdata->dev_comp->must_tx)
542 master->flags = SPI_MASTER_MUST_TX;
543
544 if (mdata->dev_comp->need_pad_sel) {
545 ret = of_property_read_u32(pdev->dev.of_node,
546 "mediatek,pad-select",
547 &mdata->pad_sel);
548 if (ret) {
549 dev_err(&pdev->dev, "failed to read pad select: %d\n",
550 ret);
551 goto err_put_master;
552 }
553
554 if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) {
555 dev_err(&pdev->dev, "wrong pad-select: %u\n",
556 mdata->pad_sel);
557 ret = -EINVAL;
558 goto err_put_master;
559 }
560 }
561
562 platform_set_drvdata(pdev, master);
563
564 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
565 if (!res) {
566 ret = -ENODEV;
567 dev_err(&pdev->dev, "failed to determine base address\n");
568 goto err_put_master;
569 }
570
571 mdata->base = devm_ioremap_resource(&pdev->dev, res);
572 if (IS_ERR(mdata->base)) {
573 ret = PTR_ERR(mdata->base);
574 goto err_put_master;
575 }
576
577 irq = platform_get_irq(pdev, 0);
578 if (irq < 0) {
579 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
580 ret = irq;
581 goto err_put_master;
582 }
583
584 if (!pdev->dev.dma_mask)
585 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
586
587 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
588 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
589 if (ret) {
590 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
591 goto err_put_master;
592 }
593
594 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
595 if (IS_ERR(mdata->spi_clk)) {
596 ret = PTR_ERR(mdata->spi_clk);
597 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
598 goto err_put_master;
599 }
600
601 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
602 if (IS_ERR(mdata->parent_clk)) {
603 ret = PTR_ERR(mdata->parent_clk);
604 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
605 goto err_put_master;
606 }
607
608 ret = clk_prepare_enable(mdata->spi_clk);
609 if (ret < 0) {
610 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
611 goto err_put_master;
612 }
613
614 ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk);
615 if (ret < 0) {
616 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
617 goto err_disable_clk;
618 }
619
620 clk_disable_unprepare(mdata->spi_clk);
621
622 pm_runtime_enable(&pdev->dev);
623
624 ret = devm_spi_register_master(&pdev->dev, master);
625 if (ret) {
626 dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
627 goto err_put_master;
628 }
629
630 return 0;
631
632err_disable_clk:
633 clk_disable_unprepare(mdata->spi_clk);
634err_put_master:
635 spi_master_put(master);
636
637 return ret;
638}
639
640static int mtk_spi_remove(struct platform_device *pdev)
641{
642 struct spi_master *master = platform_get_drvdata(pdev);
643 struct mtk_spi *mdata = spi_master_get_devdata(master);
644
645 pm_runtime_disable(&pdev->dev);
646
647 mtk_spi_reset(mdata);
648 clk_disable_unprepare(mdata->spi_clk);
649 spi_master_put(master);
650
651 return 0;
652}
653
654#ifdef CONFIG_PM_SLEEP
655static int mtk_spi_suspend(struct device *dev)
656{
657 int ret;
658 struct spi_master *master = dev_get_drvdata(dev);
659 struct mtk_spi *mdata = spi_master_get_devdata(master);
660
661 ret = spi_master_suspend(master);
662 if (ret)
663 return ret;
664
665 if (!pm_runtime_suspended(dev))
666 clk_disable_unprepare(mdata->spi_clk);
667
668 return ret;
669}
670
671static int mtk_spi_resume(struct device *dev)
672{
673 int ret;
674 struct spi_master *master = dev_get_drvdata(dev);
675 struct mtk_spi *mdata = spi_master_get_devdata(master);
676
677 if (!pm_runtime_suspended(dev)) {
678 ret = clk_prepare_enable(mdata->spi_clk);
679 if (ret < 0)
680 return ret;
681 }
682
683 ret = spi_master_resume(master);
684 if (ret < 0)
685 clk_disable_unprepare(mdata->spi_clk);
686
687 return ret;
688}
689#endif /* CONFIG_PM_SLEEP */
690
691#ifdef CONFIG_PM
692static int mtk_spi_runtime_suspend(struct device *dev)
693{
694 struct spi_master *master = dev_get_drvdata(dev);
695 struct mtk_spi *mdata = spi_master_get_devdata(master);
696
697 clk_disable_unprepare(mdata->spi_clk);
698
699 return 0;
700}
701
702static int mtk_spi_runtime_resume(struct device *dev)
703{
704 struct spi_master *master = dev_get_drvdata(dev);
705 struct mtk_spi *mdata = spi_master_get_devdata(master);
706
707 return clk_prepare_enable(mdata->spi_clk);
708}
709#endif /* CONFIG_PM */
710
711static const struct dev_pm_ops mtk_spi_pm = {
712 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
713 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
714 mtk_spi_runtime_resume, NULL)
715};
716
kbuild test robot4299aaa2015-08-07 22:33:11 +0800717static struct platform_driver mtk_spi_driver = {
Leilk Liua5682312015-08-07 15:19:50 +0800718 .driver = {
719 .name = "mtk-spi",
720 .pm = &mtk_spi_pm,
721 .of_match_table = mtk_spi_of_match,
722 },
723 .probe = mtk_spi_probe,
724 .remove = mtk_spi_remove,
725};
726
727module_platform_driver(mtk_spi_driver);
728
729MODULE_DESCRIPTION("MTK SPI Controller driver");
730MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
731MODULE_LICENSE("GPL v2");
Axel Line4001882015-08-11 09:15:30 +0800732MODULE_ALIAS("platform:mtk-spi");