blob: ac5112def40d1df35d93ef8eb453fe9d3884d95c [file] [log] [blame]
Greg Kroah-Hartmane3b3d0f2017-11-06 18:11:51 +01001// SPDX-License-Identifier: GPL-2.0+
Jingchang Luc9e2e942013-06-07 09:20:40 +08002/*
3 * Freescale lpuart serial port driver
4 *
Jingchang Lu380c9662014-07-14 17:41:11 +08005 * Copyright 2012-2014 Freescale Semiconductor, Inc.
Jingchang Luc9e2e942013-06-07 09:20:40 +08006 */
7
Yuan Yaof1cd8c82014-02-17 13:28:07 +08008#include <linux/clk.h>
9#include <linux/console.h>
Fugang Duanbd5305d2021-08-23 17:18:01 +080010#include <linux/delay.h>
Yuan Yaof1cd8c82014-02-17 13:28:07 +080011#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/dmapool.h>
Jingchang Luc9e2e942013-06-07 09:20:40 +080014#include <linux/io.h>
15#include <linux/irq.h>
Yuan Yaof1cd8c82014-02-17 13:28:07 +080016#include <linux/module.h>
Jingchang Luc9e2e942013-06-07 09:20:40 +080017#include <linux/of.h>
18#include <linux/of_device.h>
Yuan Yaof1cd8c82014-02-17 13:28:07 +080019#include <linux/of_dma.h>
Jingchang Luc9e2e942013-06-07 09:20:40 +080020#include <linux/serial_core.h>
Yuan Yaof1cd8c82014-02-17 13:28:07 +080021#include <linux/slab.h>
Jingchang Luc9e2e942013-06-07 09:20:40 +080022#include <linux/tty_flip.h>
23
24/* All registers are 8-bit width */
25#define UARTBDH 0x00
26#define UARTBDL 0x01
27#define UARTCR1 0x02
28#define UARTCR2 0x03
29#define UARTSR1 0x04
30#define UARTCR3 0x06
31#define UARTDR 0x07
32#define UARTCR4 0x0a
33#define UARTCR5 0x0b
34#define UARTMODEM 0x0d
35#define UARTPFIFO 0x10
36#define UARTCFIFO 0x11
37#define UARTSFIFO 0x12
38#define UARTTWFIFO 0x13
39#define UARTTCFIFO 0x14
40#define UARTRWFIFO 0x15
41
42#define UARTBDH_LBKDIE 0x80
43#define UARTBDH_RXEDGIE 0x40
44#define UARTBDH_SBR_MASK 0x1f
45
46#define UARTCR1_LOOPS 0x80
47#define UARTCR1_RSRC 0x20
48#define UARTCR1_M 0x10
49#define UARTCR1_WAKE 0x08
50#define UARTCR1_ILT 0x04
51#define UARTCR1_PE 0x02
52#define UARTCR1_PT 0x01
53
54#define UARTCR2_TIE 0x80
55#define UARTCR2_TCIE 0x40
56#define UARTCR2_RIE 0x20
57#define UARTCR2_ILIE 0x10
58#define UARTCR2_TE 0x08
59#define UARTCR2_RE 0x04
60#define UARTCR2_RWU 0x02
61#define UARTCR2_SBK 0x01
62
63#define UARTSR1_TDRE 0x80
64#define UARTSR1_TC 0x40
65#define UARTSR1_RDRF 0x20
66#define UARTSR1_IDLE 0x10
67#define UARTSR1_OR 0x08
68#define UARTSR1_NF 0x04
69#define UARTSR1_FE 0x02
70#define UARTSR1_PE 0x01
71
72#define UARTCR3_R8 0x80
73#define UARTCR3_T8 0x40
74#define UARTCR3_TXDIR 0x20
75#define UARTCR3_TXINV 0x10
76#define UARTCR3_ORIE 0x08
77#define UARTCR3_NEIE 0x04
78#define UARTCR3_FEIE 0x02
79#define UARTCR3_PEIE 0x01
80
81#define UARTCR4_MAEN1 0x80
82#define UARTCR4_MAEN2 0x40
83#define UARTCR4_M10 0x20
84#define UARTCR4_BRFA_MASK 0x1f
85#define UARTCR4_BRFA_OFF 0
86
87#define UARTCR5_TDMAS 0x80
88#define UARTCR5_RDMAS 0x20
89
90#define UARTMODEM_RXRTSE 0x08
91#define UARTMODEM_TXRTSPOL 0x04
92#define UARTMODEM_TXRTSE 0x02
93#define UARTMODEM_TXCTSE 0x01
94
95#define UARTPFIFO_TXFE 0x80
96#define UARTPFIFO_FIFOSIZE_MASK 0x7
97#define UARTPFIFO_TXSIZE_OFF 4
98#define UARTPFIFO_RXFE 0x08
99#define UARTPFIFO_RXSIZE_OFF 0
100
101#define UARTCFIFO_TXFLUSH 0x80
102#define UARTCFIFO_RXFLUSH 0x40
103#define UARTCFIFO_RXOFE 0x04
104#define UARTCFIFO_TXOFE 0x02
105#define UARTCFIFO_RXUFE 0x01
106
107#define UARTSFIFO_TXEMPT 0x80
108#define UARTSFIFO_RXEMPT 0x40
109#define UARTSFIFO_RXOF 0x04
110#define UARTSFIFO_TXOF 0x02
111#define UARTSFIFO_RXUF 0x01
112
Fugang Duanbd5305d2021-08-23 17:18:01 +0800113/* 32-bit global registers only for i.MX7ULP/i.MX8x
114 * Used to reset all internal logic and registers, except the Global Register.
115 */
116#define UART_GLOBAL 0x8
117
Marius Vlada5fa2662017-07-16 01:00:58 +0300118/* 32-bit register definition */
Jingchang Lu380c9662014-07-14 17:41:11 +0800119#define UARTBAUD 0x00
120#define UARTSTAT 0x04
121#define UARTCTRL 0x08
122#define UARTDATA 0x0C
123#define UARTMATCH 0x10
124#define UARTMODIR 0x14
125#define UARTFIFO 0x18
126#define UARTWATER 0x1c
127
128#define UARTBAUD_MAEN1 0x80000000
129#define UARTBAUD_MAEN2 0x40000000
130#define UARTBAUD_M10 0x20000000
131#define UARTBAUD_TDMAE 0x00800000
132#define UARTBAUD_RDMAE 0x00200000
133#define UARTBAUD_MATCFG 0x00400000
134#define UARTBAUD_BOTHEDGE 0x00020000
135#define UARTBAUD_RESYNCDIS 0x00010000
136#define UARTBAUD_LBKDIE 0x00008000
137#define UARTBAUD_RXEDGIE 0x00004000
138#define UARTBAUD_SBNS 0x00002000
139#define UARTBAUD_SBR 0x00000000
140#define UARTBAUD_SBR_MASK 0x1fff
Dong Aishenga6d75142017-06-13 10:55:54 +0800141#define UARTBAUD_OSR_MASK 0x1f
142#define UARTBAUD_OSR_SHIFT 24
Jingchang Lu380c9662014-07-14 17:41:11 +0800143
144#define UARTSTAT_LBKDIF 0x80000000
145#define UARTSTAT_RXEDGIF 0x40000000
146#define UARTSTAT_MSBF 0x20000000
147#define UARTSTAT_RXINV 0x10000000
148#define UARTSTAT_RWUID 0x08000000
149#define UARTSTAT_BRK13 0x04000000
150#define UARTSTAT_LBKDE 0x02000000
151#define UARTSTAT_RAF 0x01000000
152#define UARTSTAT_TDRE 0x00800000
153#define UARTSTAT_TC 0x00400000
154#define UARTSTAT_RDRF 0x00200000
155#define UARTSTAT_IDLE 0x00100000
156#define UARTSTAT_OR 0x00080000
157#define UARTSTAT_NF 0x00040000
158#define UARTSTAT_FE 0x00020000
159#define UARTSTAT_PE 0x00010000
160#define UARTSTAT_MA1F 0x00008000
161#define UARTSTAT_M21F 0x00004000
162
163#define UARTCTRL_R8T9 0x80000000
164#define UARTCTRL_R9T8 0x40000000
165#define UARTCTRL_TXDIR 0x20000000
166#define UARTCTRL_TXINV 0x10000000
167#define UARTCTRL_ORIE 0x08000000
168#define UARTCTRL_NEIE 0x04000000
169#define UARTCTRL_FEIE 0x02000000
170#define UARTCTRL_PEIE 0x01000000
171#define UARTCTRL_TIE 0x00800000
172#define UARTCTRL_TCIE 0x00400000
173#define UARTCTRL_RIE 0x00200000
174#define UARTCTRL_ILIE 0x00100000
175#define UARTCTRL_TE 0x00080000
176#define UARTCTRL_RE 0x00040000
177#define UARTCTRL_RWU 0x00020000
178#define UARTCTRL_SBK 0x00010000
179#define UARTCTRL_MA1IE 0x00008000
180#define UARTCTRL_MA2IE 0x00004000
181#define UARTCTRL_IDLECFG 0x00000100
182#define UARTCTRL_LOOPS 0x00000080
183#define UARTCTRL_DOZEEN 0x00000040
184#define UARTCTRL_RSRC 0x00000020
185#define UARTCTRL_M 0x00000010
186#define UARTCTRL_WAKE 0x00000008
187#define UARTCTRL_ILT 0x00000004
188#define UARTCTRL_PE 0x00000002
189#define UARTCTRL_PT 0x00000001
190
191#define UARTDATA_NOISY 0x00008000
192#define UARTDATA_PARITYE 0x00004000
193#define UARTDATA_FRETSC 0x00002000
194#define UARTDATA_RXEMPT 0x00001000
195#define UARTDATA_IDLINE 0x00000800
196#define UARTDATA_MASK 0x3ff
197
198#define UARTMODIR_IREN 0x00020000
199#define UARTMODIR_TXCTSSRC 0x00000020
200#define UARTMODIR_TXCTSC 0x00000010
201#define UARTMODIR_RXRTSE 0x00000008
202#define UARTMODIR_TXRTSPOL 0x00000004
203#define UARTMODIR_TXRTSE 0x00000002
204#define UARTMODIR_TXCTSE 0x00000001
205
206#define UARTFIFO_TXEMPT 0x00800000
207#define UARTFIFO_RXEMPT 0x00400000
208#define UARTFIFO_TXOF 0x00020000
209#define UARTFIFO_RXUF 0x00010000
210#define UARTFIFO_TXFLUSH 0x00008000
211#define UARTFIFO_RXFLUSH 0x00004000
212#define UARTFIFO_TXOFE 0x00000200
213#define UARTFIFO_RXUFE 0x00000100
214#define UARTFIFO_TXFE 0x00000080
215#define UARTFIFO_FIFOSIZE_MASK 0x7
216#define UARTFIFO_TXSIZE_OFF 4
217#define UARTFIFO_RXFE 0x00000008
218#define UARTFIFO_RXSIZE_OFF 0
Fugang Duanf77ebb22019-07-17 13:19:30 +0800219#define UARTFIFO_DEPTH(x) (0x1 << ((x) ? ((x) + 1) : 0))
Jingchang Lu380c9662014-07-14 17:41:11 +0800220
221#define UARTWATER_COUNT_MASK 0xff
222#define UARTWATER_TXCNT_OFF 8
223#define UARTWATER_RXCNT_OFF 24
224#define UARTWATER_WATER_MASK 0xff
225#define UARTWATER_TXWATER_OFF 0
226#define UARTWATER_RXWATER_OFF 16
227
Fugang Duanbd5305d2021-08-23 17:18:01 +0800228#define UART_GLOBAL_RST 0x2
229#define GLOBAL_RST_MIN_US 20
230#define GLOBAL_RST_MAX_US 40
231
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +0530232/* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */
233#define DMA_RX_TIMEOUT (10)
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800234
Jingchang Luc9e2e942013-06-07 09:20:40 +0800235#define DRIVER_NAME "fsl-lpuart"
236#define DEV_NAME "ttyLP"
237#define UART_NR 6
238
Dong Aisheng24b1e5f2017-06-13 10:55:52 +0800239/* IMX lpuart has four extra unused regs located at the beginning */
240#define IMX_REG_OFF 0x10
241
Vabhav Sharma3bc32062018-10-10 03:56:16 +0530242static DEFINE_IDA(fsl_lpuart_ida);
243
Fugang Duan35a4ed02019-07-04 21:40:07 +0800244enum lpuart_type {
245 VF610_LPUART,
246 LS1021A_LPUART,
Michael Wallec2f448c2020-03-06 22:44:32 +0100247 LS1028A_LPUART,
Fugang Duan35a4ed02019-07-04 21:40:07 +0800248 IMX7ULP_LPUART,
249 IMX8QXP_LPUART,
250};
251
Jingchang Luc9e2e942013-06-07 09:20:40 +0800252struct lpuart_port {
253 struct uart_port port;
Fugang Duan35a4ed02019-07-04 21:40:07 +0800254 enum lpuart_type devtype;
255 struct clk *ipg_clk;
256 struct clk *baud_clk;
Jingchang Luc9e2e942013-06-07 09:20:40 +0800257 unsigned int txfifo_size;
258 unsigned int rxfifo_size;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800259
Stefan Agner4a818c42015-01-10 09:33:45 +0100260 bool lpuart_dma_tx_use;
261 bool lpuart_dma_rx_use;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800262 struct dma_chan *dma_tx_chan;
263 struct dma_chan *dma_rx_chan;
264 struct dma_async_tx_descriptor *dma_tx_desc;
265 struct dma_async_tx_descriptor *dma_rx_desc;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800266 dma_cookie_t dma_tx_cookie;
267 dma_cookie_t dma_rx_cookie;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800268 unsigned int dma_tx_bytes;
269 unsigned int dma_rx_bytes;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530270 bool dma_tx_in_progress;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800271 unsigned int dma_rx_timeout;
272 struct timer_list lpuart_timer;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530273 struct scatterlist rx_sgl, tx_sgl[2];
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +0530274 struct circ_buf rx_ring;
275 int rx_dma_rng_buf_len;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530276 unsigned int dma_tx_nents;
277 wait_queue_head_t dma_wait;
Michael Walle2b2e71f2020-03-03 18:42:59 +0100278 bool id_allocated;
Jingchang Luc9e2e942013-06-07 09:20:40 +0800279};
280
Dong Aisheng0d6fce92017-06-13 10:55:48 +0800281struct lpuart_soc_data {
Fugang Duan35a4ed02019-07-04 21:40:07 +0800282 enum lpuart_type devtype;
283 char iotype;
284 u8 reg_off;
Dong Aisheng0d6fce92017-06-13 10:55:48 +0800285};
286
287static const struct lpuart_soc_data vf_data = {
Fugang Duan35a4ed02019-07-04 21:40:07 +0800288 .devtype = VF610_LPUART,
Dong Aisheng0d6fce92017-06-13 10:55:48 +0800289 .iotype = UPIO_MEM,
290};
291
Michael Wallec2f448c2020-03-06 22:44:32 +0100292static const struct lpuart_soc_data ls1021a_data = {
Fugang Duan35a4ed02019-07-04 21:40:07 +0800293 .devtype = LS1021A_LPUART,
Dong Aisheng0d6fce92017-06-13 10:55:48 +0800294 .iotype = UPIO_MEM32BE,
295};
296
Michael Wallec2f448c2020-03-06 22:44:32 +0100297static const struct lpuart_soc_data ls1028a_data = {
298 .devtype = LS1028A_LPUART,
299 .iotype = UPIO_MEM32,
300};
301
Fugang Duan35a4ed02019-07-04 21:40:07 +0800302static struct lpuart_soc_data imx7ulp_data = {
303 .devtype = IMX7ULP_LPUART,
304 .iotype = UPIO_MEM32,
305 .reg_off = IMX_REG_OFF,
306};
307
308static struct lpuart_soc_data imx8qxp_data = {
309 .devtype = IMX8QXP_LPUART,
Dong Aisheng24b1e5f2017-06-13 10:55:52 +0800310 .iotype = UPIO_MEM32,
311 .reg_off = IMX_REG_OFF,
312};
313
Fabian Fredericked0bb232015-03-16 20:17:11 +0100314static const struct of_device_id lpuart_dt_ids[] = {
Dong Aisheng0d6fce92017-06-13 10:55:48 +0800315 { .compatible = "fsl,vf610-lpuart", .data = &vf_data, },
Michael Wallec2f448c2020-03-06 22:44:32 +0100316 { .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, },
317 { .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, },
Fugang Duan35a4ed02019-07-04 21:40:07 +0800318 { .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, },
319 { .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, },
Jingchang Luc9e2e942013-06-07 09:20:40 +0800320 { /* sentinel */ }
321};
322MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
323
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800324/* Forward declare this for the dma callbacks*/
325static void lpuart_dma_tx_complete(void *arg);
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800326
Vladimir Olteanc97f2a62020-10-23 04:34:29 +0300327static inline bool is_layerscape_lpuart(struct lpuart_port *sport)
Michael Wallec2f448c2020-03-06 22:44:32 +0100328{
Vladimir Olteanc97f2a62020-10-23 04:34:29 +0300329 return (sport->devtype == LS1021A_LPUART ||
330 sport->devtype == LS1028A_LPUART);
Michael Wallec2f448c2020-03-06 22:44:32 +0100331}
332
Fugang Duanbd5305d2021-08-23 17:18:01 +0800333static inline bool is_imx7ulp_lpuart(struct lpuart_port *sport)
334{
335 return sport->devtype == IMX7ULP_LPUART;
336}
337
Fugang Duan35a4ed02019-07-04 21:40:07 +0800338static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
339{
340 return sport->devtype == IMX8QXP_LPUART;
341}
342
Dong Aishengf98e1fc2017-06-13 10:55:50 +0800343static inline u32 lpuart32_read(struct uart_port *port, u32 off)
Jingchang Lu380c9662014-07-14 17:41:11 +0800344{
Dong Aishengf98e1fc2017-06-13 10:55:50 +0800345 switch (port->iotype) {
346 case UPIO_MEM32:
347 return readl(port->membase + off);
348 case UPIO_MEM32BE:
349 return ioread32be(port->membase + off);
350 default:
351 return 0;
352 }
Jingchang Lu380c9662014-07-14 17:41:11 +0800353}
354
Dong Aishenga0204f22017-06-13 10:55:49 +0800355static inline void lpuart32_write(struct uart_port *port, u32 val,
Dong Aishengf98e1fc2017-06-13 10:55:50 +0800356 u32 off)
Jingchang Lu380c9662014-07-14 17:41:11 +0800357{
Dong Aishengf98e1fc2017-06-13 10:55:50 +0800358 switch (port->iotype) {
359 case UPIO_MEM32:
360 writel(val, port->membase + off);
361 break;
362 case UPIO_MEM32BE:
363 iowrite32be(val, port->membase + off);
364 break;
365 }
Jingchang Lu380c9662014-07-14 17:41:11 +0800366}
367
Fugang Duan35a4ed02019-07-04 21:40:07 +0800368static int __lpuart_enable_clks(struct lpuart_port *sport, bool is_en)
369{
370 int ret = 0;
371
372 if (is_en) {
373 ret = clk_prepare_enable(sport->ipg_clk);
374 if (ret)
375 return ret;
376
377 ret = clk_prepare_enable(sport->baud_clk);
378 if (ret) {
379 clk_disable_unprepare(sport->ipg_clk);
380 return ret;
381 }
382 } else {
383 clk_disable_unprepare(sport->baud_clk);
384 clk_disable_unprepare(sport->ipg_clk);
385 }
386
387 return 0;
388}
389
390static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
391{
392 if (is_imx8qxp_lpuart(sport))
393 return clk_get_rate(sport->baud_clk);
394
395 return clk_get_rate(sport->ipg_clk);
396}
397
398#define lpuart_enable_clks(x) __lpuart_enable_clks(x, true)
399#define lpuart_disable_clks(x) __lpuart_enable_clks(x, false)
400
Fugang Duanbd5305d2021-08-23 17:18:01 +0800401static int lpuart_global_reset(struct lpuart_port *sport)
402{
403 struct uart_port *port = &sport->port;
404 void __iomem *global_addr;
405 int ret;
406
407 if (uart_console(port))
408 return 0;
409
410 ret = clk_prepare_enable(sport->ipg_clk);
411 if (ret) {
412 dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
413 return ret;
414 }
415
416 if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) {
417 global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF;
418 writel(UART_GLOBAL_RST, global_addr);
419 usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
420 writel(0, global_addr);
421 usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
422 }
423
424 clk_disable_unprepare(sport->ipg_clk);
425 return 0;
426}
427
Jingchang Luc9e2e942013-06-07 09:20:40 +0800428static void lpuart_stop_tx(struct uart_port *port)
429{
430 unsigned char temp;
431
432 temp = readb(port->membase + UARTCR2);
433 temp &= ~(UARTCR2_TIE | UARTCR2_TCIE);
434 writeb(temp, port->membase + UARTCR2);
435}
436
Jingchang Lu380c9662014-07-14 17:41:11 +0800437static void lpuart32_stop_tx(struct uart_port *port)
438{
439 unsigned long temp;
440
Dong Aishenga0204f22017-06-13 10:55:49 +0800441 temp = lpuart32_read(port, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +0800442 temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
Dong Aishenga0204f22017-06-13 10:55:49 +0800443 lpuart32_write(port, temp, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +0800444}
445
Jingchang Luc9e2e942013-06-07 09:20:40 +0800446static void lpuart_stop_rx(struct uart_port *port)
447{
448 unsigned char temp;
449
450 temp = readb(port->membase + UARTCR2);
451 writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2);
452}
453
Jingchang Lu380c9662014-07-14 17:41:11 +0800454static void lpuart32_stop_rx(struct uart_port *port)
455{
456 unsigned long temp;
457
Dong Aishenga0204f22017-06-13 10:55:49 +0800458 temp = lpuart32_read(port, UARTCTRL);
459 lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +0800460}
461
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530462static void lpuart_dma_tx(struct lpuart_port *sport)
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800463{
464 struct circ_buf *xmit = &sport->port.state->xmit;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530465 struct scatterlist *sgl = sport->tx_sgl;
466 struct device *dev = sport->port.dev;
Michael Wallea092ab22020-03-06 22:44:31 +0100467 struct dma_chan *chan = sport->dma_tx_chan;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530468 int ret;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800469
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530470 if (sport->dma_tx_in_progress)
471 return;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800472
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530473 sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
474
Aaron Briced704b2d2016-10-06 15:13:04 -0700475 if (xmit->tail < xmit->head || xmit->head == 0) {
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530476 sport->dma_tx_nents = 1;
477 sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
478 } else {
479 sport->dma_tx_nents = 2;
480 sg_init_table(sgl, 2);
481 sg_set_buf(sgl, xmit->buf + xmit->tail,
482 UART_XMIT_SIZE - xmit->tail);
483 sg_set_buf(sgl + 1, xmit->buf, xmit->head);
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800484 }
485
Michael Wallea092ab22020-03-06 22:44:31 +0100486 ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents,
487 DMA_TO_DEVICE);
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530488 if (!ret) {
489 dev_err(dev, "DMA mapping error for TX.\n");
490 return;
491 }
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800492
Michael Wallea092ab22020-03-06 22:44:31 +0100493 sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl,
Peng Fan487ee862019-11-05 05:51:10 +0000494 ret, DMA_MEM_TO_DEV,
495 DMA_PREP_INTERRUPT);
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800496 if (!sport->dma_tx_desc) {
Michael Wallea092ab22020-03-06 22:44:31 +0100497 dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
498 DMA_TO_DEVICE);
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530499 dev_err(dev, "Cannot prepare TX slave DMA!\n");
500 return;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800501 }
502
503 sport->dma_tx_desc->callback = lpuart_dma_tx_complete;
504 sport->dma_tx_desc->callback_param = sport;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530505 sport->dma_tx_in_progress = true;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800506 sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
Michael Wallea092ab22020-03-06 22:44:31 +0100507 dma_async_issue_pending(chan);
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800508}
509
Andrey Smirnova90fa532019-07-29 12:52:18 -0700510static bool lpuart_stopped_or_empty(struct uart_port *port)
511{
512 return uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port);
513}
514
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800515static void lpuart_dma_tx_complete(void *arg)
516{
517 struct lpuart_port *sport = arg;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530518 struct scatterlist *sgl = &sport->tx_sgl[0];
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800519 struct circ_buf *xmit = &sport->port.state->xmit;
Michael Wallea092ab22020-03-06 22:44:31 +0100520 struct dma_chan *chan = sport->dma_tx_chan;
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800521 unsigned long flags;
522
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800523 spin_lock_irqsave(&sport->port.lock, flags);
Fugang Duan88c1d242021-08-17 18:02:03 +0800524 if (!sport->dma_tx_in_progress) {
525 spin_unlock_irqrestore(&sport->port.lock, flags);
526 return;
527 }
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800528
Michael Wallea092ab22020-03-06 22:44:31 +0100529 dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
530 DMA_TO_DEVICE);
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530531
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800532 xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530533
534 sport->port.icount.tx += sport->dma_tx_bytes;
535 sport->dma_tx_in_progress = false;
536 spin_unlock_irqrestore(&sport->port.lock, flags);
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800537
538 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
539 uart_write_wakeup(&sport->port);
540
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530541 if (waitqueue_active(&sport->dma_wait)) {
542 wake_up(&sport->dma_wait);
543 return;
544 }
545
546 spin_lock_irqsave(&sport->port.lock, flags);
547
Andrey Smirnova90fa532019-07-29 12:52:18 -0700548 if (!lpuart_stopped_or_empty(&sport->port))
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530549 lpuart_dma_tx(sport);
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800550
551 spin_unlock_irqrestore(&sport->port.lock, flags);
552}
553
Atsushi Nemoto42b68762019-01-23 12:20:17 +0900554static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
555{
556 switch (sport->port.iotype) {
557 case UPIO_MEM32:
558 return sport->port.mapbase + UARTDATA;
559 case UPIO_MEM32BE:
560 return sport->port.mapbase + UARTDATA + sizeof(u32) - 1;
561 }
562 return sport->port.mapbase + UARTDR;
563}
564
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530565static int lpuart_dma_tx_request(struct uart_port *port)
566{
567 struct lpuart_port *sport = container_of(port,
568 struct lpuart_port, port);
569 struct dma_slave_config dma_tx_sconfig = {};
570 int ret;
571
Atsushi Nemoto42b68762019-01-23 12:20:17 +0900572 dma_tx_sconfig.dst_addr = lpuart_dma_datareg_addr(sport);
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530573 dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
574 dma_tx_sconfig.dst_maxburst = 1;
575 dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
576 ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
577
578 if (ret) {
579 dev_err(sport->port.dev,
580 "DMA slave config failed, err = %d\n", ret);
581 return ret;
582 }
583
584 return 0;
585}
586
Andrey Smirnov9bc19af2019-07-29 12:52:05 -0700587static bool lpuart_is_32(struct lpuart_port *sport)
588{
589 return sport->port.iotype == UPIO_MEM32 ||
590 sport->port.iotype == UPIO_MEM32BE;
591}
592
Stefan Agnerbfc2e072015-01-26 01:10:16 +0100593static void lpuart_flush_buffer(struct uart_port *port)
594{
595 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
Michael Wallea092ab22020-03-06 22:44:31 +0100596 struct dma_chan *chan = sport->dma_tx_chan;
Andrey Smirnov9bc19af2019-07-29 12:52:05 -0700597 u32 val;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530598
Stefan Agnerbfc2e072015-01-26 01:10:16 +0100599 if (sport->lpuart_dma_tx_use) {
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530600 if (sport->dma_tx_in_progress) {
Michael Wallea092ab22020-03-06 22:44:31 +0100601 dma_unmap_sg(chan->device->dev, &sport->tx_sgl[0],
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530602 sport->dma_tx_nents, DMA_TO_DEVICE);
603 sport->dma_tx_in_progress = false;
604 }
Michael Wallea092ab22020-03-06 22:44:31 +0100605 dmaengine_terminate_all(chan);
Stefan Agnerbfc2e072015-01-26 01:10:16 +0100606 }
Andrey Smirnov9bc19af2019-07-29 12:52:05 -0700607
608 if (lpuart_is_32(sport)) {
609 val = lpuart32_read(&sport->port, UARTFIFO);
610 val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
611 lpuart32_write(&sport->port, val, UARTFIFO);
612 } else {
Andrey Smirnov5df884d2019-10-04 14:55:37 -0700613 val = readb(sport->port.membase + UARTCFIFO);
Andrey Smirnov9bc19af2019-07-29 12:52:05 -0700614 val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
615 writeb(val, sport->port.membase + UARTCFIFO);
616 }
Stefan Agnerbfc2e072015-01-26 01:10:16 +0100617}
618
Andrey Smirnov56dd6272019-07-29 12:52:16 -0700619static void lpuart_wait_bit_set(struct uart_port *port, unsigned int offset,
620 u8 bit)
621{
622 while (!(readb(port->membase + offset) & bit))
Andrey Smirnovf2f5e042019-07-29 12:52:17 -0700623 cpu_relax();
Andrey Smirnov56dd6272019-07-29 12:52:16 -0700624}
625
626static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset,
627 u32 bit)
628{
629 while (!(lpuart32_read(port, offset) & bit))
Andrey Smirnovf2f5e042019-07-29 12:52:17 -0700630 cpu_relax();
Andrey Smirnov56dd6272019-07-29 12:52:16 -0700631}
632
Nicolae Rosia2a41bc22016-10-04 15:46:16 +0300633#if defined(CONFIG_CONSOLE_POLL)
634
635static int lpuart_poll_init(struct uart_port *port)
636{
637 struct lpuart_port *sport = container_of(port,
638 struct lpuart_port, port);
639 unsigned long flags;
640 unsigned char temp;
641
642 sport->port.fifosize = 0;
643
644 spin_lock_irqsave(&sport->port.lock, flags);
645 /* Disable Rx & Tx */
646 writeb(0, sport->port.membase + UARTCR2);
647
648 temp = readb(sport->port.membase + UARTPFIFO);
649 /* Enable Rx and Tx FIFO */
650 writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
651 sport->port.membase + UARTPFIFO);
652
653 /* flush Tx and Rx FIFO */
654 writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
655 sport->port.membase + UARTCFIFO);
656
657 /* explicitly clear RDRF */
658 if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
659 readb(sport->port.membase + UARTDR);
660 writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
661 }
662
663 writeb(0, sport->port.membase + UARTTWFIFO);
664 writeb(1, sport->port.membase + UARTRWFIFO);
665
666 /* Enable Rx and Tx */
667 writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
668 spin_unlock_irqrestore(&sport->port.lock, flags);
669
670 return 0;
671}
672
673static void lpuart_poll_put_char(struct uart_port *port, unsigned char c)
674{
Nicolae Rosia2a41bc22016-10-04 15:46:16 +0300675 /* drain */
Andrey Smirnov56dd6272019-07-29 12:52:16 -0700676 lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE);
Nicolae Rosia2a41bc22016-10-04 15:46:16 +0300677 writeb(c, port->membase + UARTDR);
678}
679
680static int lpuart_poll_get_char(struct uart_port *port)
681{
682 if (!(readb(port->membase + UARTSR1) & UARTSR1_RDRF))
683 return NO_POLL_CHAR;
684
685 return readb(port->membase + UARTDR);
686}
687
Marius Vlada5fa2662017-07-16 01:00:58 +0300688static int lpuart32_poll_init(struct uart_port *port)
689{
690 unsigned long flags;
691 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
692 u32 temp;
693
694 sport->port.fifosize = 0;
695
696 spin_lock_irqsave(&sport->port.lock, flags);
697
698 /* Disable Rx & Tx */
Peng Fan9ea40db2020-09-29 17:19:20 +0800699 lpuart32_write(&sport->port, 0, UARTCTRL);
Marius Vlada5fa2662017-07-16 01:00:58 +0300700
Andrey Smirnov1da17d72019-07-29 12:52:15 -0700701 temp = lpuart32_read(&sport->port, UARTFIFO);
Marius Vlada5fa2662017-07-16 01:00:58 +0300702
703 /* Enable Rx and Tx FIFO */
Peng Fan9ea40db2020-09-29 17:19:20 +0800704 lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
Marius Vlada5fa2662017-07-16 01:00:58 +0300705
706 /* flush Tx and Rx FIFO */
Peng Fan9ea40db2020-09-29 17:19:20 +0800707 lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
Marius Vlada5fa2662017-07-16 01:00:58 +0300708
709 /* explicitly clear RDRF */
Andrey Smirnov1da17d72019-07-29 12:52:15 -0700710 if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
711 lpuart32_read(&sport->port, UARTDATA);
Peng Fan9ea40db2020-09-29 17:19:20 +0800712 lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
Marius Vlada5fa2662017-07-16 01:00:58 +0300713 }
714
715 /* Enable Rx and Tx */
Peng Fan9ea40db2020-09-29 17:19:20 +0800716 lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
Marius Vlada5fa2662017-07-16 01:00:58 +0300717 spin_unlock_irqrestore(&sport->port.lock, flags);
718
719 return 0;
720}
721
722static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
723{
Andrey Smirnov56dd6272019-07-29 12:52:16 -0700724 lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
Peng Fan9ea40db2020-09-29 17:19:20 +0800725 lpuart32_write(port, c, UARTDATA);
Marius Vlada5fa2662017-07-16 01:00:58 +0300726}
727
728static int lpuart32_poll_get_char(struct uart_port *port)
729{
Peng Fan29788ab2020-09-29 17:55:09 +0800730 if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
Marius Vlada5fa2662017-07-16 01:00:58 +0300731 return NO_POLL_CHAR;
732
Andrey Smirnov1da17d72019-07-29 12:52:15 -0700733 return lpuart32_read(port, UARTDATA);
Marius Vlada5fa2662017-07-16 01:00:58 +0300734}
Nicolae Rosia2a41bc22016-10-04 15:46:16 +0300735#endif
736
Jingchang Luc9e2e942013-06-07 09:20:40 +0800737static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
738{
739 struct circ_buf *xmit = &sport->port.state->xmit;
740
Andrey Smirnov93b95232019-07-29 12:52:11 -0700741 if (sport->port.x_char) {
742 writeb(sport->port.x_char, sport->port.membase + UARTDR);
743 sport->port.icount.tx++;
744 sport->port.x_char = 0;
745 return;
746 }
747
Andrey Smirnova90fa532019-07-29 12:52:18 -0700748 if (lpuart_stopped_or_empty(&sport->port)) {
Andrey Smirnov93b95232019-07-29 12:52:11 -0700749 lpuart_stop_tx(&sport->port);
750 return;
751 }
752
Jingchang Luc9e2e942013-06-07 09:20:40 +0800753 while (!uart_circ_empty(xmit) &&
754 (readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size)) {
755 writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR);
756 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
757 sport->port.icount.tx++;
758 }
759
760 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
761 uart_write_wakeup(&sport->port);
762
763 if (uart_circ_empty(xmit))
764 lpuart_stop_tx(&sport->port);
765}
766
Jingchang Lu380c9662014-07-14 17:41:11 +0800767static inline void lpuart32_transmit_buffer(struct lpuart_port *sport)
768{
769 struct circ_buf *xmit = &sport->port.state->xmit;
770 unsigned long txcnt;
771
Andrey Smirnov93b95232019-07-29 12:52:11 -0700772 if (sport->port.x_char) {
773 lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
774 sport->port.icount.tx++;
775 sport->port.x_char = 0;
776 return;
777 }
778
Andrey Smirnova90fa532019-07-29 12:52:18 -0700779 if (lpuart_stopped_or_empty(&sport->port)) {
Andrey Smirnov93b95232019-07-29 12:52:11 -0700780 lpuart32_stop_tx(&sport->port);
781 return;
782 }
783
Dong Aishenga0204f22017-06-13 10:55:49 +0800784 txcnt = lpuart32_read(&sport->port, UARTWATER);
Jingchang Lu380c9662014-07-14 17:41:11 +0800785 txcnt = txcnt >> UARTWATER_TXCNT_OFF;
786 txcnt &= UARTWATER_COUNT_MASK;
787 while (!uart_circ_empty(xmit) && (txcnt < sport->txfifo_size)) {
Dong Aishenga0204f22017-06-13 10:55:49 +0800788 lpuart32_write(&sport->port, xmit->buf[xmit->tail], UARTDATA);
Jingchang Lu380c9662014-07-14 17:41:11 +0800789 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
790 sport->port.icount.tx++;
Dong Aishenga0204f22017-06-13 10:55:49 +0800791 txcnt = lpuart32_read(&sport->port, UARTWATER);
Jingchang Lu380c9662014-07-14 17:41:11 +0800792 txcnt = txcnt >> UARTWATER_TXCNT_OFF;
793 txcnt &= UARTWATER_COUNT_MASK;
794 }
795
796 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
797 uart_write_wakeup(&sport->port);
798
799 if (uart_circ_empty(xmit))
800 lpuart32_stop_tx(&sport->port);
801}
802
Jingchang Luc9e2e942013-06-07 09:20:40 +0800803static void lpuart_start_tx(struct uart_port *port)
804{
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800805 struct lpuart_port *sport = container_of(port,
806 struct lpuart_port, port);
Jingchang Luc9e2e942013-06-07 09:20:40 +0800807 unsigned char temp;
808
809 temp = readb(port->membase + UARTCR2);
810 writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
811
Stefan Agner4a818c42015-01-10 09:33:45 +0100812 if (sport->lpuart_dma_tx_use) {
Andrey Smirnova90fa532019-07-29 12:52:18 -0700813 if (!lpuart_stopped_or_empty(port))
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530814 lpuart_dma_tx(sport);
Yuan Yaof1cd8c82014-02-17 13:28:07 +0800815 } else {
816 if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
817 lpuart_transmit_buffer(sport);
818 }
Jingchang Luc9e2e942013-06-07 09:20:40 +0800819}
820
Jingchang Lu380c9662014-07-14 17:41:11 +0800821static void lpuart32_start_tx(struct uart_port *port)
822{
823 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
824 unsigned long temp;
825
Atsushi Nemoto42b68762019-01-23 12:20:17 +0900826 if (sport->lpuart_dma_tx_use) {
Andrey Smirnova90fa532019-07-29 12:52:18 -0700827 if (!lpuart_stopped_or_empty(port))
Atsushi Nemoto42b68762019-01-23 12:20:17 +0900828 lpuart_dma_tx(sport);
829 } else {
830 temp = lpuart32_read(port, UARTCTRL);
831 lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +0800832
Atsushi Nemoto42b68762019-01-23 12:20:17 +0900833 if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE)
834 lpuart32_transmit_buffer(sport);
835 }
Jingchang Lu380c9662014-07-14 17:41:11 +0800836}
837
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530838/* return TIOCSER_TEMT when transmitter is not busy */
839static unsigned int lpuart_tx_empty(struct uart_port *port)
840{
841 struct lpuart_port *sport = container_of(port,
842 struct lpuart_port, port);
843 unsigned char sr1 = readb(port->membase + UARTSR1);
844 unsigned char sfifo = readb(port->membase + UARTSFIFO);
845
846 if (sport->dma_tx_in_progress)
847 return 0;
848
849 if (sr1 & UARTSR1_TC && sfifo & UARTSFIFO_TXEMPT)
850 return TIOCSER_TEMT;
851
852 return 0;
853}
854
855static unsigned int lpuart32_tx_empty(struct uart_port *port)
856{
Atsushi Nemoto46dd6d72019-01-21 17:37:28 +0900857 struct lpuart_port *sport = container_of(port,
858 struct lpuart_port, port);
859 unsigned long stat = lpuart32_read(port, UARTSTAT);
860 unsigned long sfifo = lpuart32_read(port, UARTFIFO);
861
862 if (sport->dma_tx_in_progress)
863 return 0;
864
865 if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
866 return TIOCSER_TEMT;
867
868 return 0;
Bhuvanchandra DV6250cc32016-07-19 13:13:08 +0530869}
870
Andrey Smirnov3993ddc2019-07-29 12:52:06 -0700871static void lpuart_txint(struct lpuart_port *sport)
Jingchang Luc9e2e942013-06-07 09:20:40 +0800872{
Michael Walle0d84f622021-05-12 16:12:49 +0200873 spin_lock(&sport->port.lock);
Andrey Smirnov93b95232019-07-29 12:52:11 -0700874 lpuart_transmit_buffer(sport);
Michael Walle0d84f622021-05-12 16:12:49 +0200875 spin_unlock(&sport->port.lock);
Jingchang Luc9e2e942013-06-07 09:20:40 +0800876}
877
Andrey Smirnov3993ddc2019-07-29 12:52:06 -0700878static void lpuart_rxint(struct lpuart_port *sport)
Jingchang Luc9e2e942013-06-07 09:20:40 +0800879{
Stefan Agnercc584ab2019-07-29 12:52:04 -0700880 unsigned int flg, ignored = 0, overrun = 0;
Jingchang Luc9e2e942013-06-07 09:20:40 +0800881 struct tty_port *port = &sport->port.state->port;
Jingchang Luc9e2e942013-06-07 09:20:40 +0800882 unsigned char rx, sr;
883
Michael Walle0d84f622021-05-12 16:12:49 +0200884 spin_lock(&sport->port.lock);
Jingchang Luc9e2e942013-06-07 09:20:40 +0800885
886 while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
887 flg = TTY_NORMAL;
888 sport->port.icount.rx++;
889 /*
890 * to clear the FE, OR, NF, FE, PE flags,
891 * read SR1 then read DR
892 */
893 sr = readb(sport->port.membase + UARTSR1);
894 rx = readb(sport->port.membase + UARTDR);
895
Michael Walle5697df72021-05-12 16:12:50 +0200896 if (uart_prepare_sysrq_char(&sport->port, rx))
Jingchang Luc9e2e942013-06-07 09:20:40 +0800897 continue;
898
899 if (sr & (UARTSR1_PE | UARTSR1_OR | UARTSR1_FE)) {
900 if (sr & UARTSR1_PE)
901 sport->port.icount.parity++;
902 else if (sr & UARTSR1_FE)
903 sport->port.icount.frame++;
904
905 if (sr & UARTSR1_OR)
Stefan Agnercc584ab2019-07-29 12:52:04 -0700906 overrun++;
Jingchang Luc9e2e942013-06-07 09:20:40 +0800907
908 if (sr & sport->port.ignore_status_mask) {
909 if (++ignored > 100)
910 goto out;
911 continue;
912 }
913
914 sr &= sport->port.read_status_mask;
915
916 if (sr & UARTSR1_PE)
917 flg = TTY_PARITY;
918 else if (sr & UARTSR1_FE)
919 flg = TTY_FRAME;
920
921 if (sr & UARTSR1_OR)
922 flg = TTY_OVERRUN;
923
Jingchang Luc9e2e942013-06-07 09:20:40 +0800924 sport->port.sysrq = 0;
Jingchang Luc9e2e942013-06-07 09:20:40 +0800925 }
926
927 tty_insert_flip_char(port, rx, flg);
928 }
929
930out:
Stefan Agnercc584ab2019-07-29 12:52:04 -0700931 if (overrun) {
932 sport->port.icount.overrun += overrun;
933
934 /*
935 * Overruns cause FIFO pointers to become missaligned.
936 * Flushing the receive FIFO reinitializes the pointers.
937 */
938 writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO);
939 writeb(UARTSFIFO_RXOF, sport->port.membase + UARTSFIFO);
940 }
941
Michael Walle5697df72021-05-12 16:12:50 +0200942 uart_unlock_and_check_sysrq(&sport->port);
Jingchang Luc9e2e942013-06-07 09:20:40 +0800943
944 tty_flip_buffer_push(port);
Jingchang Luc9e2e942013-06-07 09:20:40 +0800945}
946
Andrey Smirnov93b95232019-07-29 12:52:11 -0700947static void lpuart32_txint(struct lpuart_port *sport)
948{
Michael Walle0d84f622021-05-12 16:12:49 +0200949 spin_lock(&sport->port.lock);
Andrey Smirnov93b95232019-07-29 12:52:11 -0700950 lpuart32_transmit_buffer(sport);
Michael Walle0d84f622021-05-12 16:12:49 +0200951 spin_unlock(&sport->port.lock);
Andrey Smirnov93b95232019-07-29 12:52:11 -0700952}
953
Andrey Smirnov3993ddc2019-07-29 12:52:06 -0700954static void lpuart32_rxint(struct lpuart_port *sport)
Jingchang Lu380c9662014-07-14 17:41:11 +0800955{
Jingchang Lu380c9662014-07-14 17:41:11 +0800956 unsigned int flg, ignored = 0;
957 struct tty_port *port = &sport->port.state->port;
Jingchang Lu380c9662014-07-14 17:41:11 +0800958 unsigned long rx, sr;
Michael Walle5541a9b2021-05-12 16:12:51 +0200959 bool is_break;
Jingchang Lu380c9662014-07-14 17:41:11 +0800960
Michael Walle0d84f622021-05-12 16:12:49 +0200961 spin_lock(&sport->port.lock);
Jingchang Lu380c9662014-07-14 17:41:11 +0800962
Dong Aishenga0204f22017-06-13 10:55:49 +0800963 while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
Jingchang Lu380c9662014-07-14 17:41:11 +0800964 flg = TTY_NORMAL;
965 sport->port.icount.rx++;
966 /*
967 * to clear the FE, OR, NF, FE, PE flags,
968 * read STAT then read DATA reg
969 */
Dong Aishenga0204f22017-06-13 10:55:49 +0800970 sr = lpuart32_read(&sport->port, UARTSTAT);
971 rx = lpuart32_read(&sport->port, UARTDATA);
Michael Walleec22c3e2021-05-12 16:12:48 +0200972 rx &= UARTDATA_MASK;
Jingchang Lu380c9662014-07-14 17:41:11 +0800973
Michael Walle5541a9b2021-05-12 16:12:51 +0200974 /*
975 * The LPUART can't distinguish between a break and a framing error,
976 * thus we assume it is a break if the received data is zero.
977 */
978 is_break = (sr & UARTSTAT_FE) && !rx;
979
980 if (is_break && uart_handle_break(&sport->port))
981 continue;
982
Michael Walle5697df72021-05-12 16:12:50 +0200983 if (uart_prepare_sysrq_char(&sport->port, rx))
Jingchang Lu380c9662014-07-14 17:41:11 +0800984 continue;
985
986 if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) {
Michael Walle5541a9b2021-05-12 16:12:51 +0200987 if (sr & UARTSTAT_PE) {
988 if (is_break)
989 sport->port.icount.brk++;
990 else
991 sport->port.icount.parity++;
992 } else if (sr & UARTSTAT_FE) {
Jingchang Lu380c9662014-07-14 17:41:11 +0800993 sport->port.icount.frame++;
Michael Walle5541a9b2021-05-12 16:12:51 +0200994 }
Jingchang Lu380c9662014-07-14 17:41:11 +0800995
996 if (sr & UARTSTAT_OR)
997 sport->port.icount.overrun++;
998
999 if (sr & sport->port.ignore_status_mask) {
1000 if (++ignored > 100)
1001 goto out;
1002 continue;
1003 }
1004
1005 sr &= sport->port.read_status_mask;
1006
Michael Walle5541a9b2021-05-12 16:12:51 +02001007 if (sr & UARTSTAT_PE) {
1008 if (is_break)
1009 flg = TTY_BREAK;
1010 else
1011 flg = TTY_PARITY;
1012 } else if (sr & UARTSTAT_FE) {
Jingchang Lu380c9662014-07-14 17:41:11 +08001013 flg = TTY_FRAME;
Michael Walle5541a9b2021-05-12 16:12:51 +02001014 }
Jingchang Lu380c9662014-07-14 17:41:11 +08001015
1016 if (sr & UARTSTAT_OR)
1017 flg = TTY_OVERRUN;
Jingchang Lu380c9662014-07-14 17:41:11 +08001018 }
1019
1020 tty_insert_flip_char(port, rx, flg);
1021 }
1022
1023out:
Michael Walle5697df72021-05-12 16:12:50 +02001024 uart_unlock_and_check_sysrq(&sport->port);
Jingchang Lu380c9662014-07-14 17:41:11 +08001025
1026 tty_flip_buffer_push(port);
Jingchang Lu380c9662014-07-14 17:41:11 +08001027}
1028
Jingchang Luc9e2e942013-06-07 09:20:40 +08001029static irqreturn_t lpuart_int(int irq, void *dev_id)
1030{
1031 struct lpuart_port *sport = dev_id;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301032 unsigned char sts;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001033
1034 sts = readb(sport->port.membase + UARTSR1);
1035
Angelo Dureghellof4eef222020-10-04 18:11:44 +02001036 /* SysRq, using dma, check for linebreak by framing err. */
1037 if (sts & UARTSR1_FE && sport->lpuart_dma_rx_use) {
1038 readb(sport->port.membase + UARTDR);
1039 uart_handle_break(&sport->port);
1040 /* linebreak produces some garbage, removing it */
1041 writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO);
1042 return IRQ_HANDLED;
1043 }
1044
Andrey Smirnov6798e902019-08-05 11:57:01 -07001045 if (sts & UARTSR1_RDRF && !sport->lpuart_dma_rx_use)
Andrey Smirnov3993ddc2019-07-29 12:52:06 -07001046 lpuart_rxint(sport);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301047
Andrey Smirnov6798e902019-08-05 11:57:01 -07001048 if (sts & UARTSR1_TDRE && !sport->lpuart_dma_tx_use)
Andrey Smirnov3993ddc2019-07-29 12:52:06 -07001049 lpuart_txint(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001050
1051 return IRQ_HANDLED;
1052}
1053
Jingchang Lu380c9662014-07-14 17:41:11 +08001054static irqreturn_t lpuart32_int(int irq, void *dev_id)
1055{
1056 struct lpuart_port *sport = dev_id;
1057 unsigned long sts, rxcount;
1058
Dong Aishenga0204f22017-06-13 10:55:49 +08001059 sts = lpuart32_read(&sport->port, UARTSTAT);
1060 rxcount = lpuart32_read(&sport->port, UARTWATER);
Jingchang Lu380c9662014-07-14 17:41:11 +08001061 rxcount = rxcount >> UARTWATER_RXCNT_OFF;
1062
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001063 if ((sts & UARTSTAT_RDRF || rxcount > 0) && !sport->lpuart_dma_rx_use)
Andrey Smirnov3993ddc2019-07-29 12:52:06 -07001064 lpuart32_rxint(sport);
Jingchang Lu380c9662014-07-14 17:41:11 +08001065
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001066 if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
Andrey Smirnov93b95232019-07-29 12:52:11 -07001067 lpuart32_txint(sport);
Jingchang Lu380c9662014-07-14 17:41:11 +08001068
Dong Aishenga0204f22017-06-13 10:55:49 +08001069 lpuart32_write(&sport->port, sts, UARTSTAT);
Jingchang Lu380c9662014-07-14 17:41:11 +08001070 return IRQ_HANDLED;
1071}
1072
Angelo Dureghellof4eef222020-10-04 18:11:44 +02001073
1074static inline void lpuart_handle_sysrq_chars(struct uart_port *port,
1075 unsigned char *p, int count)
1076{
1077 while (count--) {
1078 if (*p && uart_handle_sysrq_char(port, *p))
1079 return;
1080 p++;
1081 }
1082}
1083
1084static void lpuart_handle_sysrq(struct lpuart_port *sport)
1085{
1086 struct circ_buf *ring = &sport->rx_ring;
1087 int count;
1088
1089 if (ring->head < ring->tail) {
1090 count = sport->rx_sgl.length - ring->tail;
1091 lpuart_handle_sysrq_chars(&sport->port,
1092 ring->buf + ring->tail, count);
1093 ring->tail = 0;
1094 }
1095
1096 if (ring->head > ring->tail) {
1097 count = ring->head - ring->tail;
1098 lpuart_handle_sysrq_chars(&sport->port,
1099 ring->buf + ring->tail, count);
1100 ring->tail = ring->head;
1101 }
1102}
1103
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301104static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
1105{
1106 struct tty_port *port = &sport->port.state->port;
1107 struct dma_tx_state state;
1108 enum dma_status dmastat;
Michael Wallea092ab22020-03-06 22:44:31 +01001109 struct dma_chan *chan = sport->dma_rx_chan;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301110 struct circ_buf *ring = &sport->rx_ring;
1111 unsigned long flags;
1112 int count = 0;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301113
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001114 if (lpuart_is_32(sport)) {
1115 unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301116
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001117 if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
1118 /* Read DR to clear the error flags */
1119 lpuart32_read(&sport->port, UARTDATA);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301120
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001121 if (sr & UARTSTAT_PE)
1122 sport->port.icount.parity++;
1123 else if (sr & UARTSTAT_FE)
1124 sport->port.icount.frame++;
1125 }
1126 } else {
1127 unsigned char sr = readb(sport->port.membase + UARTSR1);
1128
1129 if (sr & (UARTSR1_PE | UARTSR1_FE)) {
Stefan Agner65632172019-07-29 12:52:03 -07001130 unsigned char cr2;
1131
1132 /* Disable receiver during this operation... */
1133 cr2 = readb(sport->port.membase + UARTCR2);
1134 cr2 &= ~UARTCR2_RE;
1135 writeb(cr2, sport->port.membase + UARTCR2);
1136
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001137 /* Read DR to clear the error flags */
1138 readb(sport->port.membase + UARTDR);
1139
1140 if (sr & UARTSR1_PE)
1141 sport->port.icount.parity++;
1142 else if (sr & UARTSR1_FE)
1143 sport->port.icount.frame++;
Stefan Agner65632172019-07-29 12:52:03 -07001144 /*
1145 * At this point parity/framing error is
1146 * cleared However, since the DMA already read
1147 * the data register and we had to read it
1148 * again after reading the status register to
1149 * properly clear the flags, the FIFO actually
1150 * underflowed... This requires a clearing of
1151 * the FIFO...
1152 */
1153 if (readb(sport->port.membase + UARTSFIFO) &
1154 UARTSFIFO_RXUF) {
1155 writeb(UARTSFIFO_RXUF,
1156 sport->port.membase + UARTSFIFO);
1157 writeb(UARTCFIFO_RXFLUSH,
1158 sport->port.membase + UARTCFIFO);
1159 }
1160
1161 cr2 |= UARTCR2_RE;
1162 writeb(cr2, sport->port.membase + UARTCR2);
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001163 }
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301164 }
1165
1166 async_tx_ack(sport->dma_rx_desc);
1167
1168 spin_lock_irqsave(&sport->port.lock, flags);
1169
Michael Wallea092ab22020-03-06 22:44:31 +01001170 dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301171 if (dmastat == DMA_ERROR) {
1172 dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
1173 spin_unlock_irqrestore(&sport->port.lock, flags);
1174 return;
1175 }
1176
1177 /* CPU claims ownership of RX DMA buffer */
Michael Wallea092ab22020-03-06 22:44:31 +01001178 dma_sync_sg_for_cpu(chan->device->dev, &sport->rx_sgl, 1,
1179 DMA_FROM_DEVICE);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301180
1181 /*
1182 * ring->head points to the end of data already written by the DMA.
1183 * ring->tail points to the beginning of data to be read by the
1184 * framework.
1185 * The current transfer size should not be larger than the dma buffer
1186 * length.
1187 */
1188 ring->head = sport->rx_sgl.length - state.residue;
1189 BUG_ON(ring->head > sport->rx_sgl.length);
Angelo Dureghellof4eef222020-10-04 18:11:44 +02001190
1191 /*
1192 * Silent handling of keys pressed in the sysrq timeframe
1193 */
1194 if (sport->port.sysrq) {
1195 lpuart_handle_sysrq(sport);
1196 goto exit;
1197 }
1198
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301199 /*
1200 * At this point ring->head may point to the first byte right after the
1201 * last byte of the dma buffer:
1202 * 0 <= ring->head <= sport->rx_sgl.length
1203 *
1204 * However ring->tail must always points inside the dma buffer:
1205 * 0 <= ring->tail <= sport->rx_sgl.length - 1
1206 *
1207 * Since we use a ring buffer, we have to handle the case
1208 * where head is lower than tail. In such a case, we first read from
1209 * tail to the end of the buffer then reset tail.
1210 */
1211 if (ring->head < ring->tail) {
1212 count = sport->rx_sgl.length - ring->tail;
1213
1214 tty_insert_flip_string(port, ring->buf + ring->tail, count);
1215 ring->tail = 0;
1216 sport->port.icount.rx += count;
1217 }
1218
1219 /* Finally we read data from tail to head */
1220 if (ring->tail < ring->head) {
1221 count = ring->head - ring->tail;
1222 tty_insert_flip_string(port, ring->buf + ring->tail, count);
1223 /* Wrap ring->head if needed */
1224 if (ring->head >= sport->rx_sgl.length)
1225 ring->head = 0;
1226 ring->tail = ring->head;
1227 sport->port.icount.rx += count;
1228 }
1229
Angelo Dureghellof4eef222020-10-04 18:11:44 +02001230exit:
Michael Wallea092ab22020-03-06 22:44:31 +01001231 dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301232 DMA_FROM_DEVICE);
1233
1234 spin_unlock_irqrestore(&sport->port.lock, flags);
1235
1236 tty_flip_buffer_push(port);
1237 mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
1238}
1239
1240static void lpuart_dma_rx_complete(void *arg)
1241{
1242 struct lpuart_port *sport = arg;
1243
1244 lpuart_copy_rx_to_tty(sport);
1245}
1246
Kees Cooke99e88a2017-10-16 14:43:17 -07001247static void lpuart_timer_func(struct timer_list *t)
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301248{
Kees Cooke99e88a2017-10-16 14:43:17 -07001249 struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301250
1251 lpuart_copy_rx_to_tty(sport);
1252}
1253
1254static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
1255{
1256 struct dma_slave_config dma_rx_sconfig = {};
1257 struct circ_buf *ring = &sport->rx_ring;
1258 int ret, nent;
1259 int bits, baud;
Stefan Agner3216c622018-08-28 12:44:24 +02001260 struct tty_port *port = &sport->port.state->port;
1261 struct tty_struct *tty = port->tty;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301262 struct ktermios *termios = &tty->termios;
Michael Wallea092ab22020-03-06 22:44:31 +01001263 struct dma_chan *chan = sport->dma_rx_chan;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301264
1265 baud = tty_get_baud_rate(tty);
1266
1267 bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10;
1268 if (termios->c_cflag & PARENB)
1269 bits++;
1270
1271 /*
1272 * Calculate length of one DMA buffer size to keep latency below
1273 * 10ms at any baud rate.
1274 */
1275 sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2;
1276 sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
1277 if (sport->rx_dma_rng_buf_len < 16)
1278 sport->rx_dma_rng_buf_len = 16;
1279
Fugang Duanca8d92f2019-07-17 13:19:28 +08001280 ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
Fabio Estevam099f79c2019-06-11 10:03:39 -03001281 if (!ring->buf)
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301282 return -ENOMEM;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301283
1284 sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
Michael Wallea092ab22020-03-06 22:44:31 +01001285 nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1,
1286 DMA_FROM_DEVICE);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301287
1288 if (!nent) {
1289 dev_err(sport->port.dev, "DMA Rx mapping error\n");
1290 return -EINVAL;
1291 }
1292
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001293 dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301294 dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1295 dma_rx_sconfig.src_maxburst = 1;
1296 dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
Michael Wallea092ab22020-03-06 22:44:31 +01001297 ret = dmaengine_slave_config(chan, &dma_rx_sconfig);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301298
1299 if (ret < 0) {
1300 dev_err(sport->port.dev,
1301 "DMA Rx slave config failed, err = %d\n", ret);
1302 return ret;
1303 }
1304
Michael Wallea092ab22020-03-06 22:44:31 +01001305 sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan,
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301306 sg_dma_address(&sport->rx_sgl),
1307 sport->rx_sgl.length,
1308 sport->rx_sgl.length / 2,
1309 DMA_DEV_TO_MEM,
1310 DMA_PREP_INTERRUPT);
1311 if (!sport->dma_rx_desc) {
1312 dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
1313 return -EFAULT;
1314 }
1315
1316 sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
1317 sport->dma_rx_desc->callback_param = sport;
1318 sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
Michael Wallea092ab22020-03-06 22:44:31 +01001319 dma_async_issue_pending(chan);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301320
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001321 if (lpuart_is_32(sport)) {
1322 unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
1323
1324 lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
1325 } else {
1326 writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
1327 sport->port.membase + UARTCR5);
1328 }
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301329
1330 return 0;
1331}
1332
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301333static void lpuart_dma_rx_free(struct uart_port *port)
1334{
1335 struct lpuart_port *sport = container_of(port,
1336 struct lpuart_port, port);
Michael Wallea092ab22020-03-06 22:44:31 +01001337 struct dma_chan *chan = sport->dma_rx_chan;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301338
Michael Walle810bc0a2020-04-03 19:49:42 +02001339 dmaengine_terminate_all(chan);
Michael Wallea092ab22020-03-06 22:44:31 +01001340 dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301341 kfree(sport->rx_ring.buf);
1342 sport->rx_ring.tail = 0;
1343 sport->rx_ring.head = 0;
1344 sport->dma_rx_desc = NULL;
1345 sport->dma_rx_cookie = -EINVAL;
1346}
1347
Bhuvanchandra DV03895cf2016-07-19 13:13:10 +05301348static int lpuart_config_rs485(struct uart_port *port,
1349 struct serial_rs485 *rs485)
1350{
1351 struct lpuart_port *sport = container_of(port,
1352 struct lpuart_port, port);
1353
1354 u8 modem = readb(sport->port.membase + UARTMODEM) &
1355 ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
1356 writeb(modem, sport->port.membase + UARTMODEM);
1357
Uwe Kleine-König68c338e2017-07-18 12:59:40 +02001358 /* clear unsupported configurations */
1359 rs485->delay_rts_before_send = 0;
1360 rs485->delay_rts_after_send = 0;
1361 rs485->flags &= ~SER_RS485_RX_DURING_TX;
1362
Bhuvanchandra DV03895cf2016-07-19 13:13:10 +05301363 if (rs485->flags & SER_RS485_ENABLED) {
1364 /* Enable auto RS-485 RTS mode */
1365 modem |= UARTMODEM_TXRTSE;
1366
1367 /*
Fabio Estevamc9fe14a2020-08-18 19:44:57 -03001368 * RTS needs to be logic HIGH either during transfer _or_ after
Bhuvanchandra DV03895cf2016-07-19 13:13:10 +05301369 * transfer, other variants are not supported by the hardware.
1370 */
1371
1372 if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
1373 SER_RS485_RTS_AFTER_SEND)))
1374 rs485->flags |= SER_RS485_RTS_ON_SEND;
1375
1376 if (rs485->flags & SER_RS485_RTS_ON_SEND &&
1377 rs485->flags & SER_RS485_RTS_AFTER_SEND)
1378 rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
1379
1380 /*
1381 * The hardware defaults to RTS logic HIGH while transfer.
1382 * Switch polarity in case RTS shall be logic HIGH
1383 * after transfer.
1384 * Note: UART is assumed to be active high.
1385 */
1386 if (rs485->flags & SER_RS485_RTS_ON_SEND)
1387 modem &= ~UARTMODEM_TXRTSPOL;
1388 else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
1389 modem |= UARTMODEM_TXRTSPOL;
1390 }
1391
1392 /* Store the new configuration */
1393 sport->port.rs485 = *rs485;
1394
1395 writeb(modem, sport->port.membase + UARTMODEM);
1396 return 0;
1397}
1398
Philippe Schenker67b01832019-10-17 14:14:42 +00001399static int lpuart32_config_rs485(struct uart_port *port,
1400 struct serial_rs485 *rs485)
1401{
1402 struct lpuart_port *sport = container_of(port,
1403 struct lpuart_port, port);
1404
1405 unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
1406 & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
1407 lpuart32_write(&sport->port, modem, UARTMODIR);
1408
1409 /* clear unsupported configurations */
1410 rs485->delay_rts_before_send = 0;
1411 rs485->delay_rts_after_send = 0;
1412 rs485->flags &= ~SER_RS485_RX_DURING_TX;
1413
1414 if (rs485->flags & SER_RS485_ENABLED) {
1415 /* Enable auto RS-485 RTS mode */
1416 modem |= UARTMODEM_TXRTSE;
1417
1418 /*
Fabio Estevamc9fe14a2020-08-18 19:44:57 -03001419 * RTS needs to be logic HIGH either during transfer _or_ after
Philippe Schenker67b01832019-10-17 14:14:42 +00001420 * transfer, other variants are not supported by the hardware.
1421 */
1422
1423 if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
1424 SER_RS485_RTS_AFTER_SEND)))
1425 rs485->flags |= SER_RS485_RTS_ON_SEND;
1426
1427 if (rs485->flags & SER_RS485_RTS_ON_SEND &&
1428 rs485->flags & SER_RS485_RTS_AFTER_SEND)
1429 rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
1430
1431 /*
1432 * The hardware defaults to RTS logic HIGH while transfer.
1433 * Switch polarity in case RTS shall be logic HIGH
1434 * after transfer.
1435 * Note: UART is assumed to be active high.
1436 */
1437 if (rs485->flags & SER_RS485_RTS_ON_SEND)
1438 modem &= ~UARTMODEM_TXRTSPOL;
1439 else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
1440 modem |= UARTMODEM_TXRTSPOL;
1441 }
1442
1443 /* Store the new configuration */
1444 sport->port.rs485 = *rs485;
1445
1446 lpuart32_write(&sport->port, modem, UARTMODIR);
1447 return 0;
1448}
1449
Jingchang Luc9e2e942013-06-07 09:20:40 +08001450static unsigned int lpuart_get_mctrl(struct uart_port *port)
1451{
Michael Walle8a0c8102021-05-12 16:12:54 +02001452 unsigned int mctrl = 0;
1453 u8 reg;
1454
1455 reg = readb(port->membase + UARTCR1);
1456 if (reg & UARTCR1_LOOPS)
1457 mctrl |= TIOCM_LOOP;
1458
1459 return mctrl;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001460}
1461
Jingchang Lu380c9662014-07-14 17:41:11 +08001462static unsigned int lpuart32_get_mctrl(struct uart_port *port)
1463{
Sherry Sun06e91df2021-07-29 16:31:09 +08001464 unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
Michael Walle8a0c8102021-05-12 16:12:54 +02001465 u32 reg;
1466
1467 reg = lpuart32_read(port, UARTCTRL);
1468 if (reg & UARTCTRL_LOOPS)
1469 mctrl |= TIOCM_LOOP;
1470
1471 return mctrl;
Jingchang Lu380c9662014-07-14 17:41:11 +08001472}
1473
Jingchang Luc9e2e942013-06-07 09:20:40 +08001474static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
1475{
Michael Walle8a0c8102021-05-12 16:12:54 +02001476 u8 reg;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001477
Michael Walle8a0c8102021-05-12 16:12:54 +02001478 reg = readb(port->membase + UARTCR1);
1479
1480 /* for internal loopback we need LOOPS=1 and RSRC=0 */
1481 reg &= ~(UARTCR1_LOOPS | UARTCR1_RSRC);
1482 if (mctrl & TIOCM_LOOP)
1483 reg |= UARTCR1_LOOPS;
1484
1485 writeb(reg, port->membase + UARTCR1);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001486}
1487
Jingchang Lu380c9662014-07-14 17:41:11 +08001488static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
1489{
Michael Walle8a0c8102021-05-12 16:12:54 +02001490 u32 reg;
Jingchang Lu380c9662014-07-14 17:41:11 +08001491
Michael Walle8a0c8102021-05-12 16:12:54 +02001492 reg = lpuart32_read(port, UARTCTRL);
1493
1494 /* for internal loopback we need LOOPS=1 and RSRC=0 */
1495 reg &= ~(UARTCTRL_LOOPS | UARTCTRL_RSRC);
1496 if (mctrl & TIOCM_LOOP)
1497 reg |= UARTCTRL_LOOPS;
1498
1499 lpuart32_write(port, reg, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08001500}
1501
Jingchang Luc9e2e942013-06-07 09:20:40 +08001502static void lpuart_break_ctl(struct uart_port *port, int break_state)
1503{
1504 unsigned char temp;
1505
1506 temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
1507
1508 if (break_state != 0)
1509 temp |= UARTCR2_SBK;
1510
1511 writeb(temp, port->membase + UARTCR2);
1512}
1513
Jingchang Lu380c9662014-07-14 17:41:11 +08001514static void lpuart32_break_ctl(struct uart_port *port, int break_state)
1515{
1516 unsigned long temp;
1517
Dong Aishenga0204f22017-06-13 10:55:49 +08001518 temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
Jingchang Lu380c9662014-07-14 17:41:11 +08001519
1520 if (break_state != 0)
1521 temp |= UARTCTRL_SBK;
1522
Dong Aishenga0204f22017-06-13 10:55:49 +08001523 lpuart32_write(port, temp, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08001524}
1525
Jingchang Luc9e2e942013-06-07 09:20:40 +08001526static void lpuart_setup_watermark(struct lpuart_port *sport)
1527{
1528 unsigned char val, cr2;
Shawn Guobc764b82013-07-08 15:53:38 +08001529 unsigned char cr2_saved;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001530
1531 cr2 = readb(sport->port.membase + UARTCR2);
Shawn Guobc764b82013-07-08 15:53:38 +08001532 cr2_saved = cr2;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001533 cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_TE |
1534 UARTCR2_RIE | UARTCR2_RE);
1535 writeb(cr2, sport->port.membase + UARTCR2);
1536
Jingchang Luc9e2e942013-06-07 09:20:40 +08001537 val = readb(sport->port.membase + UARTPFIFO);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001538 writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
1539 sport->port.membase + UARTPFIFO);
1540
1541 /* flush Tx and Rx FIFO */
1542 writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
1543 sport->port.membase + UARTCFIFO);
1544
Stefan Agnerd68827c2016-07-19 13:13:05 +05301545 /* explicitly clear RDRF */
1546 if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
1547 readb(sport->port.membase + UARTDR);
1548 writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
1549 }
1550
Yuan Yaof1cd8c82014-02-17 13:28:07 +08001551 writeb(0, sport->port.membase + UARTTWFIFO);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001552 writeb(1, sport->port.membase + UARTRWFIFO);
Shawn Guobc764b82013-07-08 15:53:38 +08001553
1554 /* Restore cr2 */
1555 writeb(cr2_saved, sport->port.membase + UARTCR2);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001556}
1557
Andrey Smirnov352bd552019-08-05 11:56:59 -07001558static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
1559{
1560 unsigned char cr2;
1561
1562 lpuart_setup_watermark(sport);
1563
1564 cr2 = readb(sport->port.membase + UARTCR2);
Andrey Smirnovf7ec1722019-08-05 11:57:00 -07001565 cr2 |= UARTCR2_RIE | UARTCR2_RE | UARTCR2_TE;
Andrey Smirnov352bd552019-08-05 11:56:59 -07001566 writeb(cr2, sport->port.membase + UARTCR2);
1567}
1568
Jingchang Lu380c9662014-07-14 17:41:11 +08001569static void lpuart32_setup_watermark(struct lpuart_port *sport)
1570{
1571 unsigned long val, ctrl;
1572 unsigned long ctrl_saved;
1573
Dong Aishenga0204f22017-06-13 10:55:49 +08001574 ctrl = lpuart32_read(&sport->port, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08001575 ctrl_saved = ctrl;
1576 ctrl &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_TE |
1577 UARTCTRL_RIE | UARTCTRL_RE);
Dong Aishenga0204f22017-06-13 10:55:49 +08001578 lpuart32_write(&sport->port, ctrl, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08001579
1580 /* enable FIFO mode */
Dong Aishenga0204f22017-06-13 10:55:49 +08001581 val = lpuart32_read(&sport->port, UARTFIFO);
Jingchang Lu380c9662014-07-14 17:41:11 +08001582 val |= UARTFIFO_TXFE | UARTFIFO_RXFE;
1583 val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
Dong Aishenga0204f22017-06-13 10:55:49 +08001584 lpuart32_write(&sport->port, val, UARTFIFO);
Jingchang Lu380c9662014-07-14 17:41:11 +08001585
1586 /* set the watermark */
1587 val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
Dong Aishenga0204f22017-06-13 10:55:49 +08001588 lpuart32_write(&sport->port, val, UARTWATER);
Jingchang Lu380c9662014-07-14 17:41:11 +08001589
1590 /* Restore cr2 */
Dong Aishenga0204f22017-06-13 10:55:49 +08001591 lpuart32_write(&sport->port, ctrl_saved, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08001592}
1593
Andrey Smirnov352bd552019-08-05 11:56:59 -07001594static void lpuart32_setup_watermark_enable(struct lpuart_port *sport)
1595{
1596 u32 temp;
1597
1598 lpuart32_setup_watermark(sport);
1599
1600 temp = lpuart32_read(&sport->port, UARTCTRL);
1601 temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
1602 lpuart32_write(&sport->port, temp, UARTCTRL);
1603}
1604
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301605static void rx_dma_timer_init(struct lpuart_port *sport)
Yuan Yaof1cd8c82014-02-17 13:28:07 +08001606{
Andrey Smirnov834a9742019-07-29 12:52:07 -07001607 timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
1608 sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
1609 add_timer(&sport->lpuart_timer);
Yuan Yaof1cd8c82014-02-17 13:28:07 +08001610}
1611
Michael Walled0e76002020-03-25 10:06:57 +01001612static void lpuart_request_dma(struct lpuart_port *sport)
Andrey Smirnov59821992019-08-05 11:56:56 -07001613{
Michael Walle159381d2020-03-06 22:44:30 +01001614 sport->dma_tx_chan = dma_request_chan(sport->port.dev, "tx");
1615 if (IS_ERR(sport->dma_tx_chan)) {
Fabio Estevam44da0362020-04-16 12:34:53 -03001616 dev_dbg_once(sport->port.dev,
1617 "DMA tx channel request failed, operating without tx DMA (%ld)\n",
1618 PTR_ERR(sport->dma_tx_chan));
Michael Walle159381d2020-03-06 22:44:30 +01001619 sport->dma_tx_chan = NULL;
Andrey Smirnov59821992019-08-05 11:56:56 -07001620 }
Michael Walle159381d2020-03-06 22:44:30 +01001621
Michael Walled0e76002020-03-25 10:06:57 +01001622 sport->dma_rx_chan = dma_request_chan(sport->port.dev, "rx");
1623 if (IS_ERR(sport->dma_rx_chan)) {
Fabio Estevam44da0362020-04-16 12:34:53 -03001624 dev_dbg_once(sport->port.dev,
1625 "DMA rx channel request failed, operating without rx DMA (%ld)\n",
1626 PTR_ERR(sport->dma_rx_chan));
Michael Walled0e76002020-03-25 10:06:57 +01001627 sport->dma_rx_chan = NULL;
1628 }
1629}
1630
1631static void lpuart_tx_dma_startup(struct lpuart_port *sport)
1632{
1633 u32 uartbaud;
1634 int ret;
1635
Michael Walle8cac2f62021-05-12 16:12:55 +02001636 if (uart_console(&sport->port))
1637 goto err;
1638
Michael Walled0e76002020-03-25 10:06:57 +01001639 if (!sport->dma_tx_chan)
1640 goto err;
1641
Michael Walle159381d2020-03-06 22:44:30 +01001642 ret = lpuart_dma_tx_request(&sport->port);
Michael Walled7c53fb2020-03-25 10:06:58 +01001643 if (ret)
Michael Walle159381d2020-03-06 22:44:30 +01001644 goto err;
1645
1646 init_waitqueue_head(&sport->dma_wait);
1647 sport->lpuart_dma_tx_use = true;
1648 if (lpuart_is_32(sport)) {
1649 uartbaud = lpuart32_read(&sport->port, UARTBAUD);
1650 lpuart32_write(&sport->port,
1651 uartbaud | UARTBAUD_TDMAE, UARTBAUD);
1652 } else {
1653 writeb(readb(sport->port.membase + UARTCR5) |
1654 UARTCR5_TDMAS, sport->port.membase + UARTCR5);
1655 }
1656
1657 return;
1658
1659err:
1660 sport->lpuart_dma_tx_use = false;
Andrey Smirnov59821992019-08-05 11:56:56 -07001661}
1662
Andrey Smirnovfd60e8e2019-08-05 11:56:57 -07001663static void lpuart_rx_dma_startup(struct lpuart_port *sport)
1664{
Michael Walle159381d2020-03-06 22:44:30 +01001665 int ret;
Angelo Dureghellof4eef222020-10-04 18:11:44 +02001666 unsigned char cr3;
Andrey Smirnovfd60e8e2019-08-05 11:56:57 -07001667
Michael Walle8cac2f62021-05-12 16:12:55 +02001668 if (uart_console(&sport->port))
1669 goto err;
1670
Michael Walled0e76002020-03-25 10:06:57 +01001671 if (!sport->dma_rx_chan)
Michael Walle159381d2020-03-06 22:44:30 +01001672 goto err;
Michael Walle159381d2020-03-06 22:44:30 +01001673
1674 ret = lpuart_start_rx_dma(sport);
1675 if (ret)
1676 goto err;
1677
1678 /* set Rx DMA timeout */
1679 sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
1680 if (!sport->dma_rx_timeout)
1681 sport->dma_rx_timeout = 1;
1682
1683 sport->lpuart_dma_rx_use = true;
1684 rx_dma_timer_init(sport);
1685
Michael Walleccf08fd2021-05-12 16:12:47 +02001686 if (sport->port.has_sysrq && !lpuart_is_32(sport)) {
Angelo Dureghellof4eef222020-10-04 18:11:44 +02001687 cr3 = readb(sport->port.membase + UARTCR3);
1688 cr3 |= UARTCR3_FEIE;
1689 writeb(cr3, sport->port.membase + UARTCR3);
1690 }
1691
Michael Walle159381d2020-03-06 22:44:30 +01001692 return;
1693
1694err:
1695 sport->lpuart_dma_rx_use = false;
Andrey Smirnovfd60e8e2019-08-05 11:56:57 -07001696}
1697
Jingchang Luc9e2e942013-06-07 09:20:40 +08001698static int lpuart_startup(struct uart_port *port)
1699{
1700 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001701 unsigned long flags;
1702 unsigned char temp;
1703
Stefan Agnered9891b2014-07-02 18:02:57 +02001704 /* determine FIFO size and enable FIFO mode */
1705 temp = readb(sport->port.membase + UARTPFIFO);
1706
Fugang Duanf77ebb22019-07-17 13:19:30 +08001707 sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) &
1708 UARTPFIFO_FIFOSIZE_MASK);
Stefan Agner4e8f2452015-03-13 14:51:50 +01001709 sport->port.fifosize = sport->txfifo_size;
1710
Fugang Duanf77ebb22019-07-17 13:19:30 +08001711 sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) &
1712 UARTPFIFO_FIFOSIZE_MASK);
Stefan Agnered9891b2014-07-02 18:02:57 +02001713
Michael Walled0e76002020-03-25 10:06:57 +01001714 lpuart_request_dma(sport);
1715
Jingchang Luc9e2e942013-06-07 09:20:40 +08001716 spin_lock_irqsave(&sport->port.lock, flags);
1717
Andrey Smirnov352bd552019-08-05 11:56:59 -07001718 lpuart_setup_watermark_enable(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001719
Andrey Smirnovfd60e8e2019-08-05 11:56:57 -07001720 lpuart_rx_dma_startup(sport);
Andrey Smirnov59821992019-08-05 11:56:56 -07001721 lpuart_tx_dma_startup(sport);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301722
Jingchang Luc9e2e942013-06-07 09:20:40 +08001723 spin_unlock_irqrestore(&sport->port.lock, flags);
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301724
Jingchang Luc9e2e942013-06-07 09:20:40 +08001725 return 0;
1726}
1727
Andrey Smirnov4ff69042019-08-05 11:56:58 -07001728static void lpuart32_configure(struct lpuart_port *sport)
1729{
1730 unsigned long temp;
1731
1732 if (sport->lpuart_dma_rx_use) {
1733 /* RXWATER must be 0 */
1734 temp = lpuart32_read(&sport->port, UARTWATER);
1735 temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
1736 lpuart32_write(&sport->port, temp, UARTWATER);
1737 }
1738 temp = lpuart32_read(&sport->port, UARTCTRL);
1739 if (!sport->lpuart_dma_rx_use)
1740 temp |= UARTCTRL_RIE;
1741 if (!sport->lpuart_dma_tx_use)
1742 temp |= UARTCTRL_TIE;
1743 lpuart32_write(&sport->port, temp, UARTCTRL);
1744}
1745
Jingchang Lu380c9662014-07-14 17:41:11 +08001746static int lpuart32_startup(struct uart_port *port)
1747{
1748 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
Jingchang Lu380c9662014-07-14 17:41:11 +08001749 unsigned long flags;
1750 unsigned long temp;
1751
1752 /* determine FIFO size */
Dong Aishenga0204f22017-06-13 10:55:49 +08001753 temp = lpuart32_read(&sport->port, UARTFIFO);
Jingchang Lu380c9662014-07-14 17:41:11 +08001754
Fugang Duanf77ebb22019-07-17 13:19:30 +08001755 sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) &
1756 UARTFIFO_FIFOSIZE_MASK);
Atsushi Nemotob0b2735a2019-01-21 17:36:21 +09001757 sport->port.fifosize = sport->txfifo_size;
1758
Fugang Duanf77ebb22019-07-17 13:19:30 +08001759 sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
1760 UARTFIFO_FIFOSIZE_MASK);
Jingchang Lu380c9662014-07-14 17:41:11 +08001761
Michael Wallec2f448c2020-03-06 22:44:32 +01001762 /*
Vladimir Olteanc97f2a62020-10-23 04:34:29 +03001763 * The LS1021A and LS1028A have a fixed FIFO depth of 16 words.
1764 * Although they support the RX/TXSIZE fields, their encoding is
1765 * different. Eg the reference manual states 0b101 is 16 words.
Michael Wallec2f448c2020-03-06 22:44:32 +01001766 */
Vladimir Olteanc97f2a62020-10-23 04:34:29 +03001767 if (is_layerscape_lpuart(sport)) {
Michael Wallec2f448c2020-03-06 22:44:32 +01001768 sport->rxfifo_size = 16;
1769 sport->txfifo_size = 16;
1770 sport->port.fifosize = sport->txfifo_size;
1771 }
1772
Michael Walled0e76002020-03-25 10:06:57 +01001773 lpuart_request_dma(sport);
1774
Jingchang Lu380c9662014-07-14 17:41:11 +08001775 spin_lock_irqsave(&sport->port.lock, flags);
1776
Andrey Smirnov352bd552019-08-05 11:56:59 -07001777 lpuart32_setup_watermark_enable(sport);
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001778
Andrey Smirnovfd60e8e2019-08-05 11:56:57 -07001779 lpuart_rx_dma_startup(sport);
Andrey Smirnov59821992019-08-05 11:56:56 -07001780 lpuart_tx_dma_startup(sport);
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001781
Andrey Smirnov4ff69042019-08-05 11:56:58 -07001782 lpuart32_configure(sport);
Jingchang Lu380c9662014-07-14 17:41:11 +08001783
1784 spin_unlock_irqrestore(&sport->port.lock, flags);
1785 return 0;
1786}
1787
Andrey Smirnov769d55c2019-07-29 12:52:20 -07001788static void lpuart_dma_shutdown(struct lpuart_port *sport)
1789{
1790 if (sport->lpuart_dma_rx_use) {
1791 del_timer_sync(&sport->lpuart_timer);
1792 lpuart_dma_rx_free(&sport->port);
1793 }
1794
1795 if (sport->lpuart_dma_tx_use) {
1796 if (wait_event_interruptible(sport->dma_wait,
1797 !sport->dma_tx_in_progress) != false) {
1798 sport->dma_tx_in_progress = false;
1799 dmaengine_terminate_all(sport->dma_tx_chan);
1800 }
1801 }
Michael Walle159381d2020-03-06 22:44:30 +01001802
1803 if (sport->dma_tx_chan)
1804 dma_release_channel(sport->dma_tx_chan);
1805 if (sport->dma_rx_chan)
1806 dma_release_channel(sport->dma_rx_chan);
Andrey Smirnov769d55c2019-07-29 12:52:20 -07001807}
1808
Jingchang Luc9e2e942013-06-07 09:20:40 +08001809static void lpuart_shutdown(struct uart_port *port)
1810{
1811 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1812 unsigned char temp;
1813 unsigned long flags;
1814
1815 spin_lock_irqsave(&port->lock, flags);
1816
1817 /* disable Rx/Tx and interrupts */
1818 temp = readb(port->membase + UARTCR2);
1819 temp &= ~(UARTCR2_TE | UARTCR2_RE |
1820 UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
1821 writeb(temp, port->membase + UARTCR2);
1822
1823 spin_unlock_irqrestore(&port->lock, flags);
1824
Andrey Smirnov769d55c2019-07-29 12:52:20 -07001825 lpuart_dma_shutdown(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001826}
1827
Jingchang Lu380c9662014-07-14 17:41:11 +08001828static void lpuart32_shutdown(struct uart_port *port)
1829{
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001830 struct lpuart_port *sport =
1831 container_of(port, struct lpuart_port, port);
Jingchang Lu380c9662014-07-14 17:41:11 +08001832 unsigned long temp;
1833 unsigned long flags;
1834
1835 spin_lock_irqsave(&port->lock, flags);
1836
1837 /* disable Rx/Tx and interrupts */
Dong Aishenga0204f22017-06-13 10:55:49 +08001838 temp = lpuart32_read(port, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08001839 temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
1840 UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
Dong Aishenga0204f22017-06-13 10:55:49 +08001841 lpuart32_write(port, temp, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08001842
1843 spin_unlock_irqrestore(&port->lock, flags);
Atsushi Nemoto42b68762019-01-23 12:20:17 +09001844
Andrey Smirnov769d55c2019-07-29 12:52:20 -07001845 lpuart_dma_shutdown(sport);
Jingchang Lu380c9662014-07-14 17:41:11 +08001846}
1847
Jingchang Luc9e2e942013-06-07 09:20:40 +08001848static void
1849lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
1850 struct ktermios *old)
1851{
1852 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1853 unsigned long flags;
Bhuvanchandra DVaa9e7d72016-07-19 13:13:06 +05301854 unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001855 unsigned int baud;
1856 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
1857 unsigned int sbr, brfa;
1858
1859 cr1 = old_cr1 = readb(sport->port.membase + UARTCR1);
1860 old_cr2 = readb(sport->port.membase + UARTCR2);
Bhuvanchandra DVaa9e7d72016-07-19 13:13:06 +05301861 cr3 = readb(sport->port.membase + UARTCR3);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001862 cr4 = readb(sport->port.membase + UARTCR4);
1863 bdh = readb(sport->port.membase + UARTBDH);
1864 modem = readb(sport->port.membase + UARTMODEM);
1865 /*
1866 * only support CS8 and CS7, and for CS7 must enable PE.
1867 * supported mode:
1868 * - (7,e/o,1)
1869 * - (8,n,1)
1870 * - (8,m/s,1)
1871 * - (8,e/o,1)
1872 */
1873 while ((termios->c_cflag & CSIZE) != CS8 &&
1874 (termios->c_cflag & CSIZE) != CS7) {
1875 termios->c_cflag &= ~CSIZE;
1876 termios->c_cflag |= old_csize;
1877 old_csize = CS8;
1878 }
1879
1880 if ((termios->c_cflag & CSIZE) == CS8 ||
1881 (termios->c_cflag & CSIZE) == CS7)
1882 cr1 = old_cr1 & ~UARTCR1_M;
1883
1884 if (termios->c_cflag & CMSPAR) {
1885 if ((termios->c_cflag & CSIZE) != CS8) {
1886 termios->c_cflag &= ~CSIZE;
1887 termios->c_cflag |= CS8;
1888 }
1889 cr1 |= UARTCR1_M;
1890 }
1891
Bhuvanchandra DV03895cf2016-07-19 13:13:10 +05301892 /*
1893 * When auto RS-485 RTS mode is enabled,
1894 * hardware flow control need to be disabled.
1895 */
1896 if (sport->port.rs485.flags & SER_RS485_ENABLED)
1897 termios->c_cflag &= ~CRTSCTS;
1898
Andrey Smirnovd26454e2019-07-29 12:52:12 -07001899 if (termios->c_cflag & CRTSCTS)
Andrey Smirnovbcfa46b2019-07-29 12:52:13 -07001900 modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE;
Andrey Smirnovd26454e2019-07-29 12:52:12 -07001901 else
Jingchang Luc9e2e942013-06-07 09:20:40 +08001902 modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001903
Andrey Smirnov76e3f2a2019-07-29 12:52:14 -07001904 termios->c_cflag &= ~CSTOPB;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001905
1906 /* parity must be enabled when CS7 to match 8-bits format */
1907 if ((termios->c_cflag & CSIZE) == CS7)
1908 termios->c_cflag |= PARENB;
1909
Andrey Smirnovbcfa46b2019-07-29 12:52:13 -07001910 if (termios->c_cflag & PARENB) {
Jingchang Luc9e2e942013-06-07 09:20:40 +08001911 if (termios->c_cflag & CMSPAR) {
1912 cr1 &= ~UARTCR1_PE;
Bhuvanchandra DVaa9e7d72016-07-19 13:13:06 +05301913 if (termios->c_cflag & PARODD)
1914 cr3 |= UARTCR3_T8;
1915 else
1916 cr3 &= ~UARTCR3_T8;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001917 } else {
1918 cr1 |= UARTCR1_PE;
1919 if ((termios->c_cflag & CSIZE) == CS8)
1920 cr1 |= UARTCR1_M;
1921 if (termios->c_cflag & PARODD)
1922 cr1 |= UARTCR1_PT;
1923 else
1924 cr1 &= ~UARTCR1_PT;
1925 }
Andy Duan397bd922018-10-16 07:32:22 +00001926 } else {
1927 cr1 &= ~UARTCR1_PE;
Jingchang Luc9e2e942013-06-07 09:20:40 +08001928 }
1929
1930 /* ask the core to calculate the divisor */
1931 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1932
Nikita Yushchenko54a44d52016-12-04 18:49:28 +03001933 /*
1934 * Need to update the Ring buffer length according to the selected
1935 * baud rate and restart Rx DMA path.
1936 *
1937 * Since timer function acqures sport->port.lock, need to stop before
1938 * acquring same lock because otherwise del_timer_sync() can deadlock.
1939 */
1940 if (old && sport->lpuart_dma_rx_use) {
1941 del_timer_sync(&sport->lpuart_timer);
1942 lpuart_dma_rx_free(&sport->port);
1943 }
1944
Jingchang Luc9e2e942013-06-07 09:20:40 +08001945 spin_lock_irqsave(&sport->port.lock, flags);
1946
1947 sport->port.read_status_mask = 0;
1948 if (termios->c_iflag & INPCK)
Andrey Smirnovbcfa46b2019-07-29 12:52:13 -07001949 sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE;
Peter Hurleyef8b9dd2014-06-16 08:10:41 -04001950 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
Jingchang Luc9e2e942013-06-07 09:20:40 +08001951 sport->port.read_status_mask |= UARTSR1_FE;
1952
1953 /* characters to ignore */
1954 sport->port.ignore_status_mask = 0;
1955 if (termios->c_iflag & IGNPAR)
1956 sport->port.ignore_status_mask |= UARTSR1_PE;
1957 if (termios->c_iflag & IGNBRK) {
1958 sport->port.ignore_status_mask |= UARTSR1_FE;
1959 /*
1960 * if we're ignoring parity and break indicators,
1961 * ignore overruns too (for real raw support).
1962 */
1963 if (termios->c_iflag & IGNPAR)
1964 sport->port.ignore_status_mask |= UARTSR1_OR;
1965 }
1966
1967 /* update the per-port timeout */
1968 uart_update_timeout(port, termios->c_cflag, baud);
1969
1970 /* wait transmit engin complete */
Andrey Smirnov56dd6272019-07-29 12:52:16 -07001971 lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001972
1973 /* disable transmit and receive */
1974 writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE),
1975 sport->port.membase + UARTCR2);
1976
1977 sbr = sport->port.uartclk / (16 * baud);
1978 brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud;
1979 bdh &= ~UARTBDH_SBR_MASK;
1980 bdh |= (sbr >> 8) & 0x1F;
1981 cr4 &= ~UARTCR4_BRFA_MASK;
1982 brfa &= UARTCR4_BRFA_MASK;
1983 writeb(cr4 | brfa, sport->port.membase + UARTCR4);
1984 writeb(bdh, sport->port.membase + UARTBDH);
1985 writeb(sbr & 0xFF, sport->port.membase + UARTBDL);
Bhuvanchandra DVaa9e7d72016-07-19 13:13:06 +05301986 writeb(cr3, sport->port.membase + UARTCR3);
Jingchang Luc9e2e942013-06-07 09:20:40 +08001987 writeb(cr1, sport->port.membase + UARTCR1);
1988 writeb(modem, sport->port.membase + UARTMODEM);
1989
1990 /* restore control register */
1991 writeb(old_cr2, sport->port.membase + UARTCR2);
1992
Nikita Yushchenko54a44d52016-12-04 18:49:28 +03001993 if (old && sport->lpuart_dma_rx_use) {
1994 if (!lpuart_start_rx_dma(sport))
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301995 rx_dma_timer_init(sport);
Nikita Yushchenko54a44d52016-12-04 18:49:28 +03001996 else
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301997 sport->lpuart_dma_rx_use = false;
Bhuvanchandra DV5887ad42016-07-19 13:13:07 +05301998 }
1999
Jingchang Luc9e2e942013-06-07 09:20:40 +08002000 spin_unlock_irqrestore(&sport->port.lock, flags);
2001}
2002
Michael Wallee33253f2020-03-06 22:44:33 +01002003static void __lpuart32_serial_setbrg(struct uart_port *port,
2004 unsigned int baudrate, bool use_rx_dma,
2005 bool use_tx_dma)
Dong Aishenga6d75142017-06-13 10:55:54 +08002006{
2007 u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp;
Michael Wallee33253f2020-03-06 22:44:33 +01002008 u32 clk = port->uartclk;
Dong Aishenga6d75142017-06-13 10:55:54 +08002009
2010 /*
2011 * The idea is to use the best OSR (over-sampling rate) possible.
2012 * Note, OSR is typically hard-set to 16 in other LPUART instantiations.
2013 * Loop to find the best OSR value possible, one that generates minimum
2014 * baud_diff iterate through the rest of the supported values of OSR.
2015 *
2016 * Calculation Formula:
2017 * Baud Rate = baud clock / ((OSR+1) × SBR)
2018 */
2019 baud_diff = baudrate;
2020 osr = 0;
2021 sbr = 0;
2022
2023 for (tmp_osr = 4; tmp_osr <= 32; tmp_osr++) {
2024 /* calculate the temporary sbr value */
2025 tmp_sbr = (clk / (baudrate * tmp_osr));
2026 if (tmp_sbr == 0)
2027 tmp_sbr = 1;
2028
2029 /*
2030 * calculate the baud rate difference based on the temporary
2031 * osr and sbr values
2032 */
2033 tmp_diff = clk / (tmp_osr * tmp_sbr) - baudrate;
2034
2035 /* select best values between sbr and sbr+1 */
2036 tmp = clk / (tmp_osr * (tmp_sbr + 1));
2037 if (tmp_diff > (baudrate - tmp)) {
2038 tmp_diff = baudrate - tmp;
2039 tmp_sbr++;
2040 }
2041
Vabhav Sharmad10ee1d2020-06-26 16:50:34 +05302042 if (tmp_sbr > UARTBAUD_SBR_MASK)
2043 continue;
2044
Dong Aishenga6d75142017-06-13 10:55:54 +08002045 if (tmp_diff <= baud_diff) {
2046 baud_diff = tmp_diff;
2047 osr = tmp_osr;
2048 sbr = tmp_sbr;
2049
2050 if (!baud_diff)
2051 break;
2052 }
2053 }
2054
2055 /* handle buadrate outside acceptable rate */
2056 if (baud_diff > ((baudrate / 100) * 3))
Michael Wallee33253f2020-03-06 22:44:33 +01002057 dev_warn(port->dev,
Dong Aishenga6d75142017-06-13 10:55:54 +08002058 "unacceptable baud rate difference of more than 3%%\n");
2059
Michael Wallee33253f2020-03-06 22:44:33 +01002060 tmp = lpuart32_read(port, UARTBAUD);
Dong Aishenga6d75142017-06-13 10:55:54 +08002061
2062 if ((osr > 3) && (osr < 8))
2063 tmp |= UARTBAUD_BOTHEDGE;
2064
2065 tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
Andrey Smirnovbcfa46b2019-07-29 12:52:13 -07002066 tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT;
Dong Aishenga6d75142017-06-13 10:55:54 +08002067
2068 tmp &= ~UARTBAUD_SBR_MASK;
2069 tmp |= sbr & UARTBAUD_SBR_MASK;
2070
Michael Wallee33253f2020-03-06 22:44:33 +01002071 if (!use_rx_dma)
Atsushi Nemoto42b68762019-01-23 12:20:17 +09002072 tmp &= ~UARTBAUD_RDMAE;
Michael Wallee33253f2020-03-06 22:44:33 +01002073 if (!use_tx_dma)
Atsushi Nemoto42b68762019-01-23 12:20:17 +09002074 tmp &= ~UARTBAUD_TDMAE;
Dong Aishenga6d75142017-06-13 10:55:54 +08002075
Michael Wallee33253f2020-03-06 22:44:33 +01002076 lpuart32_write(port, tmp, UARTBAUD);
Dong Aishenga6d75142017-06-13 10:55:54 +08002077}
2078
Michael Wallee33253f2020-03-06 22:44:33 +01002079static void lpuart32_serial_setbrg(struct lpuart_port *sport,
2080 unsigned int baudrate)
2081{
2082 __lpuart32_serial_setbrg(&sport->port, baudrate,
2083 sport->lpuart_dma_rx_use,
2084 sport->lpuart_dma_tx_use);
2085}
2086
2087
Dong Aishenga6d75142017-06-13 10:55:54 +08002088static void
Jingchang Lu380c9662014-07-14 17:41:11 +08002089lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
2090 struct ktermios *old)
2091{
2092 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
2093 unsigned long flags;
Fugang Duan48422152021-08-23 16:17:33 +08002094 unsigned long ctrl, old_ctrl, bd, modem;
Jingchang Lu380c9662014-07-14 17:41:11 +08002095 unsigned int baud;
2096 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
Jingchang Lu380c9662014-07-14 17:41:11 +08002097
Dong Aishenga0204f22017-06-13 10:55:49 +08002098 ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
Fugang Duan48422152021-08-23 16:17:33 +08002099 bd = lpuart32_read(&sport->port, UARTBAUD);
Dong Aishenga0204f22017-06-13 10:55:49 +08002100 modem = lpuart32_read(&sport->port, UARTMODIR);
Jingchang Lu380c9662014-07-14 17:41:11 +08002101 /*
2102 * only support CS8 and CS7, and for CS7 must enable PE.
2103 * supported mode:
2104 * - (7,e/o,1)
2105 * - (8,n,1)
2106 * - (8,m/s,1)
2107 * - (8,e/o,1)
2108 */
2109 while ((termios->c_cflag & CSIZE) != CS8 &&
2110 (termios->c_cflag & CSIZE) != CS7) {
2111 termios->c_cflag &= ~CSIZE;
2112 termios->c_cflag |= old_csize;
2113 old_csize = CS8;
2114 }
2115
2116 if ((termios->c_cflag & CSIZE) == CS8 ||
2117 (termios->c_cflag & CSIZE) == CS7)
2118 ctrl = old_ctrl & ~UARTCTRL_M;
2119
2120 if (termios->c_cflag & CMSPAR) {
2121 if ((termios->c_cflag & CSIZE) != CS8) {
2122 termios->c_cflag &= ~CSIZE;
2123 termios->c_cflag |= CS8;
2124 }
2125 ctrl |= UARTCTRL_M;
2126 }
2127
Philippe Schenker67b01832019-10-17 14:14:42 +00002128 /*
2129 * When auto RS-485 RTS mode is enabled,
2130 * hardware flow control need to be disabled.
2131 */
2132 if (sport->port.rs485.flags & SER_RS485_ENABLED)
2133 termios->c_cflag &= ~CRTSCTS;
2134
Jingchang Lu380c9662014-07-14 17:41:11 +08002135 if (termios->c_cflag & CRTSCTS) {
Philippe Schenkere3553fe2019-10-17 14:14:40 +00002136 modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
Jingchang Lu380c9662014-07-14 17:41:11 +08002137 } else {
2138 termios->c_cflag &= ~CRTSCTS;
Philippe Schenkere3553fe2019-10-17 14:14:40 +00002139 modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
Jingchang Lu380c9662014-07-14 17:41:11 +08002140 }
2141
2142 if (termios->c_cflag & CSTOPB)
Fugang Duan48422152021-08-23 16:17:33 +08002143 bd |= UARTBAUD_SBNS;
2144 else
2145 bd &= ~UARTBAUD_SBNS;
Jingchang Lu380c9662014-07-14 17:41:11 +08002146
2147 /* parity must be enabled when CS7 to match 8-bits format */
2148 if ((termios->c_cflag & CSIZE) == CS7)
2149 termios->c_cflag |= PARENB;
2150
2151 if ((termios->c_cflag & PARENB)) {
2152 if (termios->c_cflag & CMSPAR) {
2153 ctrl &= ~UARTCTRL_PE;
2154 ctrl |= UARTCTRL_M;
2155 } else {
Andy Duan61e169e2018-10-16 07:32:19 +00002156 ctrl |= UARTCTRL_PE;
Jingchang Lu380c9662014-07-14 17:41:11 +08002157 if ((termios->c_cflag & CSIZE) == CS8)
2158 ctrl |= UARTCTRL_M;
2159 if (termios->c_cflag & PARODD)
2160 ctrl |= UARTCTRL_PT;
2161 else
2162 ctrl &= ~UARTCTRL_PT;
2163 }
Andy Duan397bd922018-10-16 07:32:22 +00002164 } else {
2165 ctrl &= ~UARTCTRL_PE;
Jingchang Lu380c9662014-07-14 17:41:11 +08002166 }
2167
2168 /* ask the core to calculate the divisor */
Tomonori Sakita815d8352019-01-21 17:34:16 +09002169 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
Jingchang Lu380c9662014-07-14 17:41:11 +08002170
Atsushi Nemoto42b68762019-01-23 12:20:17 +09002171 /*
2172 * Need to update the Ring buffer length according to the selected
2173 * baud rate and restart Rx DMA path.
2174 *
2175 * Since timer function acqures sport->port.lock, need to stop before
2176 * acquring same lock because otherwise del_timer_sync() can deadlock.
2177 */
2178 if (old && sport->lpuart_dma_rx_use) {
2179 del_timer_sync(&sport->lpuart_timer);
2180 lpuart_dma_rx_free(&sport->port);
2181 }
2182
Jingchang Lu380c9662014-07-14 17:41:11 +08002183 spin_lock_irqsave(&sport->port.lock, flags);
2184
2185 sport->port.read_status_mask = 0;
2186 if (termios->c_iflag & INPCK)
Andrey Smirnovbcfa46b2019-07-29 12:52:13 -07002187 sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
Jingchang Lu380c9662014-07-14 17:41:11 +08002188 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2189 sport->port.read_status_mask |= UARTSTAT_FE;
2190
2191 /* characters to ignore */
2192 sport->port.ignore_status_mask = 0;
2193 if (termios->c_iflag & IGNPAR)
2194 sport->port.ignore_status_mask |= UARTSTAT_PE;
2195 if (termios->c_iflag & IGNBRK) {
2196 sport->port.ignore_status_mask |= UARTSTAT_FE;
2197 /*
2198 * if we're ignoring parity and break indicators,
2199 * ignore overruns too (for real raw support).
2200 */
2201 if (termios->c_iflag & IGNPAR)
2202 sport->port.ignore_status_mask |= UARTSTAT_OR;
2203 }
2204
2205 /* update the per-port timeout */
2206 uart_update_timeout(port, termios->c_cflag, baud);
2207
2208 /* wait transmit engin complete */
Andrey Smirnov56dd6272019-07-29 12:52:16 -07002209 lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
Jingchang Lu380c9662014-07-14 17:41:11 +08002210
2211 /* disable transmit and receive */
Dong Aishenga0204f22017-06-13 10:55:49 +08002212 lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
2213 UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08002214
Fugang Duan48422152021-08-23 16:17:33 +08002215 lpuart32_write(&sport->port, bd, UARTBAUD);
Dong Aishenga6d75142017-06-13 10:55:54 +08002216 lpuart32_serial_setbrg(sport, baud);
Dong Aishenga0204f22017-06-13 10:55:49 +08002217 lpuart32_write(&sport->port, modem, UARTMODIR);
2218 lpuart32_write(&sport->port, ctrl, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08002219 /* restore control register */
2220
Atsushi Nemoto42b68762019-01-23 12:20:17 +09002221 if (old && sport->lpuart_dma_rx_use) {
2222 if (!lpuart_start_rx_dma(sport))
2223 rx_dma_timer_init(sport);
2224 else
2225 sport->lpuart_dma_rx_use = false;
2226 }
2227
Jingchang Lu380c9662014-07-14 17:41:11 +08002228 spin_unlock_irqrestore(&sport->port.lock, flags);
2229}
2230
Jingchang Luc9e2e942013-06-07 09:20:40 +08002231static const char *lpuart_type(struct uart_port *port)
2232{
2233 return "FSL_LPUART";
2234}
2235
2236static void lpuart_release_port(struct uart_port *port)
2237{
2238 /* nothing to do */
2239}
2240
2241static int lpuart_request_port(struct uart_port *port)
2242{
2243 return 0;
2244}
2245
2246/* configure/autoconfigure the port */
2247static void lpuart_config_port(struct uart_port *port, int flags)
2248{
2249 if (flags & UART_CONFIG_TYPE)
2250 port->type = PORT_LPUART;
2251}
2252
2253static int lpuart_verify_port(struct uart_port *port, struct serial_struct *ser)
2254{
2255 int ret = 0;
2256
2257 if (ser->type != PORT_UNKNOWN && ser->type != PORT_LPUART)
2258 ret = -EINVAL;
2259 if (port->irq != ser->irq)
2260 ret = -EINVAL;
2261 if (ser->io_type != UPIO_MEM)
2262 ret = -EINVAL;
2263 if (port->uartclk / 16 != ser->baud_base)
2264 ret = -EINVAL;
2265 if (port->iobase != ser->port)
2266 ret = -EINVAL;
2267 if (ser->hub6 != 0)
2268 ret = -EINVAL;
2269 return ret;
2270}
2271
Julia Lawall069a47e2016-09-01 19:51:35 +02002272static const struct uart_ops lpuart_pops = {
Jingchang Luc9e2e942013-06-07 09:20:40 +08002273 .tx_empty = lpuart_tx_empty,
2274 .set_mctrl = lpuart_set_mctrl,
2275 .get_mctrl = lpuart_get_mctrl,
2276 .stop_tx = lpuart_stop_tx,
2277 .start_tx = lpuart_start_tx,
2278 .stop_rx = lpuart_stop_rx,
Jingchang Luc9e2e942013-06-07 09:20:40 +08002279 .break_ctl = lpuart_break_ctl,
2280 .startup = lpuart_startup,
2281 .shutdown = lpuart_shutdown,
2282 .set_termios = lpuart_set_termios,
2283 .type = lpuart_type,
2284 .request_port = lpuart_request_port,
2285 .release_port = lpuart_release_port,
2286 .config_port = lpuart_config_port,
2287 .verify_port = lpuart_verify_port,
Stefan Agnerbfc2e072015-01-26 01:10:16 +01002288 .flush_buffer = lpuart_flush_buffer,
Nicolae Rosia2a41bc22016-10-04 15:46:16 +03002289#if defined(CONFIG_CONSOLE_POLL)
2290 .poll_init = lpuart_poll_init,
2291 .poll_get_char = lpuart_poll_get_char,
2292 .poll_put_char = lpuart_poll_put_char,
2293#endif
Jingchang Luc9e2e942013-06-07 09:20:40 +08002294};
2295
Julia Lawall069a47e2016-09-01 19:51:35 +02002296static const struct uart_ops lpuart32_pops = {
Jingchang Lu380c9662014-07-14 17:41:11 +08002297 .tx_empty = lpuart32_tx_empty,
2298 .set_mctrl = lpuart32_set_mctrl,
2299 .get_mctrl = lpuart32_get_mctrl,
2300 .stop_tx = lpuart32_stop_tx,
2301 .start_tx = lpuart32_start_tx,
2302 .stop_rx = lpuart32_stop_rx,
2303 .break_ctl = lpuart32_break_ctl,
2304 .startup = lpuart32_startup,
2305 .shutdown = lpuart32_shutdown,
2306 .set_termios = lpuart32_set_termios,
2307 .type = lpuart_type,
2308 .request_port = lpuart_request_port,
2309 .release_port = lpuart_release_port,
2310 .config_port = lpuart_config_port,
2311 .verify_port = lpuart_verify_port,
Stefan Agnerbfc2e072015-01-26 01:10:16 +01002312 .flush_buffer = lpuart_flush_buffer,
Marius Vlada5fa2662017-07-16 01:00:58 +03002313#if defined(CONFIG_CONSOLE_POLL)
2314 .poll_init = lpuart32_poll_init,
2315 .poll_get_char = lpuart32_poll_get_char,
2316 .poll_put_char = lpuart32_poll_put_char,
2317#endif
Jingchang Lu380c9662014-07-14 17:41:11 +08002318};
2319
Jingchang Luc9e2e942013-06-07 09:20:40 +08002320static struct lpuart_port *lpuart_ports[UART_NR];
2321
2322#ifdef CONFIG_SERIAL_FSL_LPUART_CONSOLE
2323static void lpuart_console_putchar(struct uart_port *port, int ch)
2324{
Andrey Smirnov56dd6272019-07-29 12:52:16 -07002325 lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002326 writeb(ch, port->membase + UARTDR);
2327}
2328
Jingchang Lu380c9662014-07-14 17:41:11 +08002329static void lpuart32_console_putchar(struct uart_port *port, int ch)
2330{
Andrey Smirnov56dd6272019-07-29 12:52:16 -07002331 lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
Dong Aishenga0204f22017-06-13 10:55:49 +08002332 lpuart32_write(port, ch, UARTDATA);
Jingchang Lu380c9662014-07-14 17:41:11 +08002333}
2334
Jingchang Luc9e2e942013-06-07 09:20:40 +08002335static void
2336lpuart_console_write(struct console *co, const char *s, unsigned int count)
2337{
2338 struct lpuart_port *sport = lpuart_ports[co->index];
2339 unsigned char old_cr2, cr2;
Stefan Agnerabf1e0a2017-03-24 11:33:46 -07002340 unsigned long flags;
2341 int locked = 1;
2342
Michael Walle5697df72021-05-12 16:12:50 +02002343 if (oops_in_progress)
Stefan Agnerabf1e0a2017-03-24 11:33:46 -07002344 locked = spin_trylock_irqsave(&sport->port.lock, flags);
2345 else
2346 spin_lock_irqsave(&sport->port.lock, flags);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002347
2348 /* first save CR2 and then disable interrupts */
2349 cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
Andrey Smirnovbcfa46b2019-07-29 12:52:13 -07002350 cr2 |= UARTCR2_TE | UARTCR2_RE;
Jingchang Luc9e2e942013-06-07 09:20:40 +08002351 cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
2352 writeb(cr2, sport->port.membase + UARTCR2);
2353
2354 uart_console_write(&sport->port, s, count, lpuart_console_putchar);
2355
2356 /* wait for transmitter finish complete and restore CR2 */
Andrey Smirnov56dd6272019-07-29 12:52:16 -07002357 lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002358
2359 writeb(old_cr2, sport->port.membase + UARTCR2);
Stefan Agnerabf1e0a2017-03-24 11:33:46 -07002360
2361 if (locked)
2362 spin_unlock_irqrestore(&sport->port.lock, flags);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002363}
2364
Jingchang Lu380c9662014-07-14 17:41:11 +08002365static void
2366lpuart32_console_write(struct console *co, const char *s, unsigned int count)
2367{
2368 struct lpuart_port *sport = lpuart_ports[co->index];
2369 unsigned long old_cr, cr;
Stefan Agnerabf1e0a2017-03-24 11:33:46 -07002370 unsigned long flags;
2371 int locked = 1;
2372
Michael Walle5697df72021-05-12 16:12:50 +02002373 if (oops_in_progress)
Stefan Agnerabf1e0a2017-03-24 11:33:46 -07002374 locked = spin_trylock_irqsave(&sport->port.lock, flags);
2375 else
2376 spin_lock_irqsave(&sport->port.lock, flags);
Jingchang Lu380c9662014-07-14 17:41:11 +08002377
2378 /* first save CR2 and then disable interrupts */
Dong Aishenga0204f22017-06-13 10:55:49 +08002379 cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
Andrey Smirnovbcfa46b2019-07-29 12:52:13 -07002380 cr |= UARTCTRL_TE | UARTCTRL_RE;
Jingchang Lu380c9662014-07-14 17:41:11 +08002381 cr &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
Dong Aishenga0204f22017-06-13 10:55:49 +08002382 lpuart32_write(&sport->port, cr, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08002383
2384 uart_console_write(&sport->port, s, count, lpuart32_console_putchar);
2385
2386 /* wait for transmitter finish complete and restore CR2 */
Andrey Smirnov56dd6272019-07-29 12:52:16 -07002387 lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
Jingchang Lu380c9662014-07-14 17:41:11 +08002388
Dong Aishenga0204f22017-06-13 10:55:49 +08002389 lpuart32_write(&sport->port, old_cr, UARTCTRL);
Stefan Agnerabf1e0a2017-03-24 11:33:46 -07002390
2391 if (locked)
2392 spin_unlock_irqrestore(&sport->port.lock, flags);
Jingchang Lu380c9662014-07-14 17:41:11 +08002393}
2394
Jingchang Luc9e2e942013-06-07 09:20:40 +08002395/*
2396 * if the port was already initialised (eg, by a boot loader),
2397 * try to determine the current setup.
2398 */
2399static void __init
2400lpuart_console_get_options(struct lpuart_port *sport, int *baud,
2401 int *parity, int *bits)
2402{
2403 unsigned char cr, bdh, bdl, brfa;
2404 unsigned int sbr, uartclk, baud_raw;
2405
2406 cr = readb(sport->port.membase + UARTCR2);
2407 cr &= UARTCR2_TE | UARTCR2_RE;
2408 if (!cr)
2409 return;
2410
2411 /* ok, the port was enabled */
2412
2413 cr = readb(sport->port.membase + UARTCR1);
2414
2415 *parity = 'n';
2416 if (cr & UARTCR1_PE) {
2417 if (cr & UARTCR1_PT)
2418 *parity = 'o';
2419 else
2420 *parity = 'e';
2421 }
2422
2423 if (cr & UARTCR1_M)
2424 *bits = 9;
2425 else
2426 *bits = 8;
2427
2428 bdh = readb(sport->port.membase + UARTBDH);
2429 bdh &= UARTBDH_SBR_MASK;
2430 bdl = readb(sport->port.membase + UARTBDL);
2431 sbr = bdh;
2432 sbr <<= 8;
2433 sbr |= bdl;
2434 brfa = readb(sport->port.membase + UARTCR4);
2435 brfa &= UARTCR4_BRFA_MASK;
2436
Fugang Duan35a4ed02019-07-04 21:40:07 +08002437 uartclk = lpuart_get_baud_clk_rate(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002438 /*
2439 * baud = mod_clk/(16*(sbr[13]+(brfa)/32)
2440 */
2441 baud_raw = uartclk / (16 * (sbr + brfa / 32));
2442
2443 if (*baud != baud_raw)
Fabio Estevam9edaf502019-06-04 00:31:38 -03002444 dev_info(sport->port.dev, "Serial: Console lpuart rounded baud rate"
Jingchang Luc9e2e942013-06-07 09:20:40 +08002445 "from %d to %d\n", baud_raw, *baud);
2446}
2447
Jingchang Lu380c9662014-07-14 17:41:11 +08002448static void __init
2449lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
2450 int *parity, int *bits)
2451{
2452 unsigned long cr, bd;
2453 unsigned int sbr, uartclk, baud_raw;
2454
Dong Aishenga0204f22017-06-13 10:55:49 +08002455 cr = lpuart32_read(&sport->port, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08002456 cr &= UARTCTRL_TE | UARTCTRL_RE;
2457 if (!cr)
2458 return;
2459
2460 /* ok, the port was enabled */
2461
Dong Aishenga0204f22017-06-13 10:55:49 +08002462 cr = lpuart32_read(&sport->port, UARTCTRL);
Jingchang Lu380c9662014-07-14 17:41:11 +08002463
2464 *parity = 'n';
2465 if (cr & UARTCTRL_PE) {
2466 if (cr & UARTCTRL_PT)
2467 *parity = 'o';
2468 else
2469 *parity = 'e';
2470 }
2471
2472 if (cr & UARTCTRL_M)
2473 *bits = 9;
2474 else
2475 *bits = 8;
2476
Dong Aishenga0204f22017-06-13 10:55:49 +08002477 bd = lpuart32_read(&sport->port, UARTBAUD);
Jingchang Lu380c9662014-07-14 17:41:11 +08002478 bd &= UARTBAUD_SBR_MASK;
Sherry Sunfcb10ee2021-04-27 10:12:26 +08002479 if (!bd)
2480 return;
2481
Jingchang Lu380c9662014-07-14 17:41:11 +08002482 sbr = bd;
Fugang Duan35a4ed02019-07-04 21:40:07 +08002483 uartclk = lpuart_get_baud_clk_rate(sport);
Jingchang Lu380c9662014-07-14 17:41:11 +08002484 /*
2485 * baud = mod_clk/(16*(sbr[13]+(brfa)/32)
2486 */
2487 baud_raw = uartclk / (16 * sbr);
2488
2489 if (*baud != baud_raw)
Fabio Estevam9edaf502019-06-04 00:31:38 -03002490 dev_info(sport->port.dev, "Serial: Console lpuart rounded baud rate"
Jingchang Lu380c9662014-07-14 17:41:11 +08002491 "from %d to %d\n", baud_raw, *baud);
2492}
2493
Jingchang Luc9e2e942013-06-07 09:20:40 +08002494static int __init lpuart_console_setup(struct console *co, char *options)
2495{
2496 struct lpuart_port *sport;
2497 int baud = 115200;
2498 int bits = 8;
2499 int parity = 'n';
2500 int flow = 'n';
2501
2502 /*
2503 * check whether an invalid uart number has been specified, and
2504 * if so, search for the first available port that does have
2505 * console support.
2506 */
2507 if (co->index == -1 || co->index >= ARRAY_SIZE(lpuart_ports))
2508 co->index = 0;
2509
2510 sport = lpuart_ports[co->index];
2511 if (sport == NULL)
2512 return -ENODEV;
2513
2514 if (options)
2515 uart_parse_options(options, &baud, &parity, &bits, &flow);
2516 else
Fabio Estevam3ee54472017-07-11 08:03:43 -03002517 if (lpuart_is_32(sport))
Jingchang Lu380c9662014-07-14 17:41:11 +08002518 lpuart32_console_get_options(sport, &baud, &parity, &bits);
2519 else
2520 lpuart_console_get_options(sport, &baud, &parity, &bits);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002521
Fabio Estevam3ee54472017-07-11 08:03:43 -03002522 if (lpuart_is_32(sport))
Jingchang Lu380c9662014-07-14 17:41:11 +08002523 lpuart32_setup_watermark(sport);
2524 else
2525 lpuart_setup_watermark(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002526
2527 return uart_set_options(&sport->port, co, baud, parity, bits, flow);
2528}
2529
2530static struct uart_driver lpuart_reg;
2531static struct console lpuart_console = {
2532 .name = DEV_NAME,
2533 .write = lpuart_console_write,
2534 .device = uart_console_device,
2535 .setup = lpuart_console_setup,
2536 .flags = CON_PRINTBUFFER,
2537 .index = -1,
2538 .data = &lpuart_reg,
2539};
2540
Jingchang Lu380c9662014-07-14 17:41:11 +08002541static struct console lpuart32_console = {
2542 .name = DEV_NAME,
2543 .write = lpuart32_console_write,
2544 .device = uart_console_device,
2545 .setup = lpuart_console_setup,
2546 .flags = CON_PRINTBUFFER,
2547 .index = -1,
2548 .data = &lpuart_reg,
2549};
2550
Stefan Agner1d59b382015-10-17 00:45:55 -07002551static void lpuart_early_write(struct console *con, const char *s, unsigned n)
2552{
2553 struct earlycon_device *dev = con->data;
2554
2555 uart_console_write(&dev->port, s, n, lpuart_console_putchar);
2556}
2557
2558static void lpuart32_early_write(struct console *con, const char *s, unsigned n)
2559{
2560 struct earlycon_device *dev = con->data;
2561
2562 uart_console_write(&dev->port, s, n, lpuart32_console_putchar);
2563}
2564
2565static int __init lpuart_early_console_setup(struct earlycon_device *device,
2566 const char *opt)
2567{
2568 if (!device->port.membase)
2569 return -ENODEV;
2570
2571 device->con->write = lpuart_early_write;
2572 return 0;
2573}
2574
2575static int __init lpuart32_early_console_setup(struct earlycon_device *device,
2576 const char *opt)
2577{
2578 if (!device->port.membase)
2579 return -ENODEV;
2580
Peng Fan3966f082019-12-20 06:13:36 +00002581 if (device->port.iotype != UPIO_MEM32)
2582 device->port.iotype = UPIO_MEM32BE;
2583
Stefan Agner1d59b382015-10-17 00:45:55 -07002584 device->con->write = lpuart32_early_write;
2585 return 0;
2586}
2587
Michael Wallee33253f2020-03-06 22:44:33 +01002588static int __init ls1028a_early_console_setup(struct earlycon_device *device,
2589 const char *opt)
2590{
2591 u32 cr;
2592
2593 if (!device->port.membase)
2594 return -ENODEV;
2595
2596 device->port.iotype = UPIO_MEM32;
2597 device->con->write = lpuart32_early_write;
2598
2599 /* set the baudrate */
2600 if (device->port.uartclk && device->baud)
2601 __lpuart32_serial_setbrg(&device->port, device->baud,
2602 false, false);
2603
2604 /* enable transmitter */
2605 cr = lpuart32_read(&device->port, UARTCTRL);
2606 cr |= UARTCTRL_TE;
2607 lpuart32_write(&device->port, cr, UARTCTRL);
2608
2609 return 0;
2610}
2611
Dong Aisheng97d6f352017-06-13 10:55:53 +08002612static int __init lpuart32_imx_early_console_setup(struct earlycon_device *device,
2613 const char *opt)
2614{
2615 if (!device->port.membase)
2616 return -ENODEV;
2617
2618 device->port.iotype = UPIO_MEM32;
2619 device->port.membase += IMX_REG_OFF;
2620 device->con->write = lpuart32_early_write;
2621
2622 return 0;
2623}
Stefan Agner1d59b382015-10-17 00:45:55 -07002624OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
2625OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
Michael Wallee33253f2020-03-06 22:44:33 +01002626OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
Dong Aisheng97d6f352017-06-13 10:55:53 +08002627OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
Alexander Stein4e967972021-11-24 08:31:09 +01002628OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
Michael Walle0e28ed62020-03-03 18:42:58 +01002629EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
2630EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
Stefan Agner1d59b382015-10-17 00:45:55 -07002631
Jingchang Luc9e2e942013-06-07 09:20:40 +08002632#define LPUART_CONSOLE (&lpuart_console)
Jingchang Lu380c9662014-07-14 17:41:11 +08002633#define LPUART32_CONSOLE (&lpuart32_console)
Jingchang Luc9e2e942013-06-07 09:20:40 +08002634#else
2635#define LPUART_CONSOLE NULL
Jingchang Lu380c9662014-07-14 17:41:11 +08002636#define LPUART32_CONSOLE NULL
Jingchang Luc9e2e942013-06-07 09:20:40 +08002637#endif
2638
2639static struct uart_driver lpuart_reg = {
2640 .owner = THIS_MODULE,
2641 .driver_name = DRIVER_NAME,
2642 .dev_name = DEV_NAME,
2643 .nr = ARRAY_SIZE(lpuart_ports),
2644 .cons = LPUART_CONSOLE,
2645};
2646
2647static int lpuart_probe(struct platform_device *pdev)
2648{
Fabio Estevame8372c42021-01-18 09:44:47 -03002649 const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002650 struct device_node *np = pdev->dev.of_node;
2651 struct lpuart_port *sport;
2652 struct resource *res;
2653 int ret;
2654
2655 sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
2656 if (!sport)
2657 return -ENOMEM;
2658
Fabio Estevam4ae612a2014-11-07 00:23:13 -02002659 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002660 sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
2661 if (IS_ERR(sport->port.membase))
2662 return PTR_ERR(sport->port.membase);
2663
Dong Aisheng24b1e5f2017-06-13 10:55:52 +08002664 sport->port.membase += sdata->reg_off;
Andy Duand5c389482021-08-19 10:10:33 +08002665 sport->port.mapbase = res->start + sdata->reg_off;
Jingchang Luc9e2e942013-06-07 09:20:40 +08002666 sport->port.dev = &pdev->dev;
2667 sport->port.type = PORT_LPUART;
Fugang Duan35a4ed02019-07-04 21:40:07 +08002668 sport->devtype = sdata->devtype;
Jiri Slaby394a9e22016-05-09 09:23:35 +02002669 ret = platform_get_irq(pdev, 0);
Stephen Boyd1df21782019-07-30 11:15:44 -07002670 if (ret < 0)
Jiri Slaby394a9e22016-05-09 09:23:35 +02002671 return ret;
Jiri Slaby394a9e22016-05-09 09:23:35 +02002672 sport->port.irq = ret;
Dong Aisheng0d6fce92017-06-13 10:55:48 +08002673 sport->port.iotype = sdata->iotype;
Fabio Estevam3ee54472017-07-11 08:03:43 -03002674 if (lpuart_is_32(sport))
Jingchang Lu380c9662014-07-14 17:41:11 +08002675 sport->port.ops = &lpuart32_pops;
2676 else
2677 sport->port.ops = &lpuart_pops;
Dmitry Safonov4d9ec1c2019-12-13 00:06:17 +00002678 sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LPUART_CONSOLE);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002679 sport->port.flags = UPF_BOOT_AUTOCONF;
2680
Philippe Schenker67b01832019-10-17 14:14:42 +00002681 if (lpuart_is_32(sport))
2682 sport->port.rs485_config = lpuart32_config_rs485;
2683 else
2684 sport->port.rs485_config = lpuart_config_rs485;
Bhuvanchandra DV03895cf2016-07-19 13:13:10 +05302685
Fugang Duan35a4ed02019-07-04 21:40:07 +08002686 sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
2687 if (IS_ERR(sport->ipg_clk)) {
2688 ret = PTR_ERR(sport->ipg_clk);
2689 dev_err(&pdev->dev, "failed to get uart ipg clk: %d\n", ret);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002690 return ret;
2691 }
2692
Fugang Duan35a4ed02019-07-04 21:40:07 +08002693 sport->baud_clk = NULL;
2694 if (is_imx8qxp_lpuart(sport)) {
2695 sport->baud_clk = devm_clk_get(&pdev->dev, "baud");
2696 if (IS_ERR(sport->baud_clk)) {
2697 ret = PTR_ERR(sport->baud_clk);
2698 dev_err(&pdev->dev, "failed to get uart baud clk: %d\n", ret);
2699 return ret;
2700 }
Jingchang Luc9e2e942013-06-07 09:20:40 +08002701 }
2702
Michael Walle2b2e71f2020-03-03 18:42:59 +01002703 ret = of_alias_get_id(np, "serial");
2704 if (ret < 0) {
2705 ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
2706 if (ret < 0) {
2707 dev_err(&pdev->dev, "port line is full, add device failed\n");
2708 return ret;
2709 }
2710 sport->id_allocated = true;
2711 }
2712 if (ret >= ARRAY_SIZE(lpuart_ports)) {
2713 dev_err(&pdev->dev, "serial%d out of range\n", ret);
2714 ret = -EINVAL;
2715 goto failed_out_of_range;
2716 }
2717 sport->port.line = ret;
2718
Fugang Duan35a4ed02019-07-04 21:40:07 +08002719 ret = lpuart_enable_clks(sport);
2720 if (ret)
Michael Walle2b2e71f2020-03-03 18:42:59 +01002721 goto failed_clock_enable;
Fugang Duan35a4ed02019-07-04 21:40:07 +08002722 sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002723
2724 lpuart_ports[sport->port.line] = sport;
2725
2726 platform_set_drvdata(pdev, &sport->port);
2727
Fugang Duan9d7ee0e2017-09-04 19:20:24 +08002728 if (lpuart_is_32(sport)) {
Jingchang Lu380c9662014-07-14 17:41:11 +08002729 lpuart_reg.cons = LPUART32_CONSOLE;
Fugang Duan9d7ee0e2017-09-04 19:20:24 +08002730 ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
2731 DRIVER_NAME, sport);
2732 } else {
Jingchang Lu380c9662014-07-14 17:41:11 +08002733 lpuart_reg.cons = LPUART_CONSOLE;
Fugang Duan9d7ee0e2017-09-04 19:20:24 +08002734 ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
2735 DRIVER_NAME, sport);
2736 }
2737
2738 if (ret)
2739 goto failed_irq_request;
Jingchang Lu380c9662014-07-14 17:41:11 +08002740
Jingchang Luc9e2e942013-06-07 09:20:40 +08002741 ret = uart_add_one_port(&lpuart_reg, &sport->port);
Fugang Duan9d7ee0e2017-09-04 19:20:24 +08002742 if (ret)
2743 goto failed_attach_port;
Jingchang Luc9e2e942013-06-07 09:20:40 +08002744
Fugang Duanbd5305d2021-08-23 17:18:01 +08002745 ret = lpuart_global_reset(sport);
2746 if (ret)
2747 goto failed_reset;
2748
Lukas Wunnerc150c0f2020-05-12 14:40:02 +02002749 ret = uart_get_rs485_mode(&sport->port);
2750 if (ret)
2751 goto failed_get_rs485;
Sascha Hauerdde18d52017-09-13 10:18:29 +02002752
Lukas Wunner01d84532017-11-24 23:26:40 +01002753 if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
Sascha Hauerdde18d52017-09-13 10:18:29 +02002754 dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
Sascha Hauerdde18d52017-09-13 10:18:29 +02002755
2756 if (sport->port.rs485.delay_rts_before_send ||
Lukas Wunner01d84532017-11-24 23:26:40 +01002757 sport->port.rs485.delay_rts_after_send)
Sascha Hauerdde18d52017-09-13 10:18:29 +02002758 dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
Sascha Hauerdde18d52017-09-13 10:18:29 +02002759
Philippe Schenker67b01832019-10-17 14:14:42 +00002760 sport->port.rs485_config(&sport->port, &sport->port.rs485);
Sascha Hauerdde18d52017-09-13 10:18:29 +02002761
Jingchang Luc9e2e942013-06-07 09:20:40 +08002762 return 0;
Fugang Duan9d7ee0e2017-09-04 19:20:24 +08002763
Lukas Wunnerc150c0f2020-05-12 14:40:02 +02002764failed_get_rs485:
Fugang Duanbd5305d2021-08-23 17:18:01 +08002765failed_reset:
2766 uart_remove_one_port(&lpuart_reg, &sport->port);
Fugang Duan9d7ee0e2017-09-04 19:20:24 +08002767failed_attach_port:
2768failed_irq_request:
Fugang Duan35a4ed02019-07-04 21:40:07 +08002769 lpuart_disable_clks(sport);
Michael Walle2b2e71f2020-03-03 18:42:59 +01002770failed_clock_enable:
2771failed_out_of_range:
2772 if (sport->id_allocated)
2773 ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
Fugang Duan9d7ee0e2017-09-04 19:20:24 +08002774 return ret;
Jingchang Luc9e2e942013-06-07 09:20:40 +08002775}
2776
2777static int lpuart_remove(struct platform_device *pdev)
2778{
2779 struct lpuart_port *sport = platform_get_drvdata(pdev);
2780
2781 uart_remove_one_port(&lpuart_reg, &sport->port);
2782
Michael Walle2b2e71f2020-03-03 18:42:59 +01002783 if (sport->id_allocated)
2784 ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
Vabhav Sharma3bc32062018-10-10 03:56:16 +05302785
Fugang Duan35a4ed02019-07-04 21:40:07 +08002786 lpuart_disable_clks(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002787
Stefan Agner4a818c42015-01-10 09:33:45 +01002788 if (sport->dma_tx_chan)
2789 dma_release_channel(sport->dma_tx_chan);
2790
2791 if (sport->dma_rx_chan)
2792 dma_release_channel(sport->dma_rx_chan);
2793
Jingchang Luc9e2e942013-06-07 09:20:40 +08002794 return 0;
2795}
2796
Anson Huangb14109f2020-05-15 20:58:01 +08002797static int __maybe_unused lpuart_suspend(struct device *dev)
Jingchang Luc9e2e942013-06-07 09:20:40 +08002798{
2799 struct lpuart_port *sport = dev_get_drvdata(dev);
Yuan Yao2fe605d2015-01-23 17:48:54 +08002800 unsigned long temp;
Andy Shevchenko3d6bcdd2017-08-13 17:47:39 +03002801 bool irq_wake;
Yuan Yao2fe605d2015-01-23 17:48:54 +08002802
Fabio Estevam3ee54472017-07-11 08:03:43 -03002803 if (lpuart_is_32(sport)) {
Yuan Yao2fe605d2015-01-23 17:48:54 +08002804 /* disable Rx/Tx and interrupts */
Dong Aishenga0204f22017-06-13 10:55:49 +08002805 temp = lpuart32_read(&sport->port, UARTCTRL);
Yuan Yao2fe605d2015-01-23 17:48:54 +08002806 temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
Dong Aishenga0204f22017-06-13 10:55:49 +08002807 lpuart32_write(&sport->port, temp, UARTCTRL);
Yuan Yao2fe605d2015-01-23 17:48:54 +08002808 } else {
2809 /* disable Rx/Tx and interrupts */
2810 temp = readb(sport->port.membase + UARTCR2);
2811 temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
2812 writeb(temp, sport->port.membase + UARTCR2);
2813 }
Jingchang Luc9e2e942013-06-07 09:20:40 +08002814
2815 uart_suspend_port(&lpuart_reg, &sport->port);
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302816
Andy Shevchenko3d6bcdd2017-08-13 17:47:39 +03002817 /* uart_suspend_port() might set wakeup flag */
2818 irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq));
2819
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302820 if (sport->lpuart_dma_rx_use) {
2821 /*
2822 * EDMA driver during suspend will forcefully release any
2823 * non-idle DMA channels. If port wakeup is enabled or if port
2824 * is console port or 'no_console_suspend' is set the Rx DMA
2825 * cannot resume as as expected, hence gracefully release the
2826 * Rx DMA path before suspend and start Rx DMA path on resume.
2827 */
Andy Shevchenko3d6bcdd2017-08-13 17:47:39 +03002828 if (irq_wake) {
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302829 del_timer_sync(&sport->lpuart_timer);
2830 lpuart_dma_rx_free(&sport->port);
2831 }
2832
2833 /* Disable Rx DMA to use UART port as wakeup source */
Atsushi Nemoto42b68762019-01-23 12:20:17 +09002834 if (lpuart_is_32(sport)) {
2835 temp = lpuart32_read(&sport->port, UARTBAUD);
2836 lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
2837 UARTBAUD);
2838 } else {
2839 writeb(readb(sport->port.membase + UARTCR5) &
2840 ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
2841 }
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302842 }
2843
2844 if (sport->lpuart_dma_tx_use) {
2845 sport->dma_tx_in_progress = false;
2846 dmaengine_terminate_all(sport->dma_tx_chan);
2847 }
2848
Andy Shevchenko3d6bcdd2017-08-13 17:47:39 +03002849 if (sport->port.suspended && !irq_wake)
Fugang Duan35a4ed02019-07-04 21:40:07 +08002850 lpuart_disable_clks(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002851
2852 return 0;
2853}
2854
Anson Huangb14109f2020-05-15 20:58:01 +08002855static int __maybe_unused lpuart_resume(struct device *dev)
Jingchang Luc9e2e942013-06-07 09:20:40 +08002856{
2857 struct lpuart_port *sport = dev_get_drvdata(dev);
Andy Shevchenko3d6bcdd2017-08-13 17:47:39 +03002858 bool irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq));
Jingchang Lu08de1012014-10-24 17:20:49 +08002859
Andy Shevchenko3d6bcdd2017-08-13 17:47:39 +03002860 if (sport->port.suspended && !irq_wake)
Fugang Duan35a4ed02019-07-04 21:40:07 +08002861 lpuart_enable_clks(sport);
Stefan Agnerd6b0d2f2016-07-19 13:13:04 +05302862
Andrey Smirnov352bd552019-08-05 11:56:59 -07002863 if (lpuart_is_32(sport))
2864 lpuart32_setup_watermark_enable(sport);
2865 else
2866 lpuart_setup_watermark_enable(sport);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002867
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302868 if (sport->lpuart_dma_rx_use) {
Andy Shevchenko3d6bcdd2017-08-13 17:47:39 +03002869 if (irq_wake) {
Nikita Yushchenko54a44d52016-12-04 18:49:28 +03002870 if (!lpuart_start_rx_dma(sport))
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302871 rx_dma_timer_init(sport);
Nikita Yushchenko54a44d52016-12-04 18:49:28 +03002872 else
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302873 sport->lpuart_dma_rx_use = false;
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302874 }
2875 }
2876
Andrey Smirnov59821992019-08-05 11:56:56 -07002877 lpuart_tx_dma_startup(sport);
Bhuvanchandra DVc05efd62016-07-19 13:13:09 +05302878
Andrey Smirnov4ff69042019-08-05 11:56:58 -07002879 if (lpuart_is_32(sport))
2880 lpuart32_configure(sport);
Atsushi Nemoto42b68762019-01-23 12:20:17 +09002881
Jingchang Luc9e2e942013-06-07 09:20:40 +08002882 uart_resume_port(&lpuart_reg, &sport->port);
2883
2884 return 0;
2885}
Jingchang Luc9e2e942013-06-07 09:20:40 +08002886
2887static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume);
2888
2889static struct platform_driver lpuart_driver = {
2890 .probe = lpuart_probe,
2891 .remove = lpuart_remove,
2892 .driver = {
2893 .name = "fsl-lpuart",
Jingchang Luc9e2e942013-06-07 09:20:40 +08002894 .of_match_table = lpuart_dt_ids,
2895 .pm = &lpuart_pm_ops,
2896 },
2897};
2898
2899static int __init lpuart_serial_init(void)
2900{
Fabio Estevam144c29e2014-11-07 00:23:14 -02002901 int ret = uart_register_driver(&lpuart_reg);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002902
Jingchang Luc9e2e942013-06-07 09:20:40 +08002903 if (ret)
2904 return ret;
2905
2906 ret = platform_driver_register(&lpuart_driver);
2907 if (ret)
2908 uart_unregister_driver(&lpuart_reg);
2909
Axel Lin39c34b02013-07-22 09:12:36 +08002910 return ret;
Jingchang Luc9e2e942013-06-07 09:20:40 +08002911}
2912
2913static void __exit lpuart_serial_exit(void)
2914{
Vabhav Sharma3bc32062018-10-10 03:56:16 +05302915 ida_destroy(&fsl_lpuart_ida);
Jingchang Luc9e2e942013-06-07 09:20:40 +08002916 platform_driver_unregister(&lpuart_driver);
2917 uart_unregister_driver(&lpuart_reg);
2918}
2919
2920module_init(lpuart_serial_init);
2921module_exit(lpuart_serial_exit);
2922
2923MODULE_DESCRIPTION("Freescale lpuart serial port driver");
2924MODULE_LICENSE("GPL v2");