blob: b0eae94909f443a0da32d28a724beb7fb97dbba5 [file] [log] [blame]
Juergen Fitschenad7d1422019-02-22 10:25:21 +01001// SPDX-License-Identifier: GPL-2.0
Nikolaus Vossfac368a2011-11-08 11:49:46 +01002/*
3 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
4 *
5 * Copyright (C) 2011 Weinmann Medical GmbH
6 * Author: Nikolaus Voss <n.voss@weinmann.de>
7 *
8 * Evolved from original work by:
9 * Copyright (C) 2004 Rick Bronson
10 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
11 *
12 * Borrowed heavily from original work by:
13 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
Nikolaus Vossfac368a2011-11-08 11:49:46 +010014 */
15
16#include <linux/clk.h>
17#include <linux/completion.h>
Ludovic Desroches60937b22012-11-23 10:09:04 +010018#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
Nikolaus Vossfac368a2011-11-08 11:49:46 +010020#include <linux/err.h>
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +020021#include <linux/gpio/consumer.h>
Nikolaus Vossfac368a2011-11-08 11:49:46 +010022#include <linux/i2c.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
Ludovic Desroches70d46a22012-09-12 08:42:14 +020025#include <linux/of.h>
26#include <linux/of_device.h>
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +020027#include <linux/pinctrl/consumer.h>
Nikolaus Vossfac368a2011-11-08 11:49:46 +010028#include <linux/platform_device.h>
Wenyou Yangd64a8182014-10-24 14:50:15 +080029#include <linux/pm_runtime.h>
Nikolaus Vossfac368a2011-11-08 11:49:46 +010030
Juergen Fitschenad7d1422019-02-22 10:25:21 +010031#include "i2c-at91.h"
Nikolaus Vossfac368a2011-11-08 11:49:46 +010032
Juergen Fitschenad7d1422019-02-22 10:25:21 +010033void at91_init_twi_bus_master(struct at91_twi_dev *dev)
Nikolaus Vossfac368a2011-11-08 11:49:46 +010034{
Eugen Hristev2989b452019-09-11 08:24:28 +000035 struct at91_twi_pdata *pdata = dev->pdata;
Eugen Hristevdda96712019-09-11 08:24:34 +000036 u32 filtr = 0;
Eugen Hristev2989b452019-09-11 08:24:28 +000037
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +020038 /* FIFO should be enabled immediately after the software reset */
39 if (dev->fifo_size)
40 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
Nikolaus Vossfac368a2011-11-08 11:49:46 +010041 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
42 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
43 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
Eugen Hristev2989b452019-09-11 08:24:28 +000044
45 /* enable digital filter */
46 if (pdata->has_dig_filtr && dev->enable_dig_filt)
Eugen Hristevdda96712019-09-11 08:24:34 +000047 filtr |= AT91_TWI_FILTR_FILT;
Eugen Hristev2be357a2019-09-11 08:24:31 +000048
49 /* enable advanced digital filter */
50 if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
Eugen Hristevdda96712019-09-11 08:24:34 +000051 filtr |= AT91_TWI_FILTR_FILT |
52 (AT91_TWI_FILTR_THRES(dev->filter_width) &
53 AT91_TWI_FILTR_THRES_MASK);
54
55 /* enable analog filter */
56 if (pdata->has_ana_filtr && dev->enable_ana_filt)
57 filtr |= AT91_TWI_FILTR_PADFEN;
58
59 if (filtr)
60 at91_twi_write(dev, AT91_TWI_FILTR, filtr);
Nikolaus Vossfac368a2011-11-08 11:49:46 +010061}
62
63/*
64 * Calculate symmetric clock as stated in datasheet:
65 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
66 */
Andy Shevchenko265bd822019-03-21 12:36:34 +030067static void at91_calc_twi_clock(struct at91_twi_dev *dev)
Nikolaus Vossfac368a2011-11-08 11:49:46 +010068{
Eugen Hristev2be357a2019-09-11 08:24:31 +000069 int ckdiv, cdiv, div, hold = 0, filter_width = 0;
Nikolaus Vossfac368a2011-11-08 11:49:46 +010070 struct at91_twi_pdata *pdata = dev->pdata;
71 int offset = pdata->clk_offset;
72 int max_ckdiv = pdata->clk_max_div;
Andy Shevchenko265bd822019-03-21 12:36:34 +030073 struct i2c_timings timings, *t = &timings;
74
75 i2c_parse_fw_timings(dev->dev, t, true);
Nikolaus Vossfac368a2011-11-08 11:49:46 +010076
77 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
Andy Shevchenko265bd822019-03-21 12:36:34 +030078 2 * t->bus_freq_hz) - offset);
Nikolaus Vossfac368a2011-11-08 11:49:46 +010079 ckdiv = fls(div >> 8);
80 cdiv = div >> ckdiv;
81
82 if (ckdiv > max_ckdiv) {
83 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
84 ckdiv, max_ckdiv);
85 ckdiv = max_ckdiv;
86 cdiv = 255;
87 }
88
Ludovic Desrochescc018e32015-12-03 10:53:50 +010089 if (pdata->has_hold_field) {
Ludovic Desrochescc018e32015-12-03 10:53:50 +010090 /*
91 * hold time = HOLD + 3 x T_peripheral_clock
92 * Use clk rate in kHz to prevent overflows when computing
93 * hold.
94 */
Andy Shevchenko265bd822019-03-21 12:36:34 +030095 hold = DIV_ROUND_UP(t->sda_hold_ns
Ludovic Desrochescc018e32015-12-03 10:53:50 +010096 * (clk_get_rate(dev->clk) / 1000), 1000000);
97 hold -= 3;
98 if (hold < 0)
99 hold = 0;
100 if (hold > AT91_TWI_CWGR_HOLD_MAX) {
101 dev_warn(dev->dev,
102 "HOLD field set to its maximum value (%d instead of %d)\n",
103 AT91_TWI_CWGR_HOLD_MAX, hold);
104 hold = AT91_TWI_CWGR_HOLD_MAX;
105 }
106 }
107
Eugen Hristev2be357a2019-09-11 08:24:31 +0000108 if (pdata->has_adv_dig_filtr) {
109 /*
110 * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
111 * peripheral clocks
112 */
113 filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
114 * (clk_get_rate(dev->clk) / 1000), 1000000);
115 if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
116 dev_warn(dev->dev,
117 "Filter threshold set to its maximum value (%d instead of %d)\n",
118 AT91_TWI_FILTR_THRES_MAX, filter_width);
119 filter_width = AT91_TWI_FILTR_THRES_MAX;
120 }
121 }
122
Ludovic Desrochescc018e32015-12-03 10:53:50 +0100123 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
124 | AT91_TWI_CWGR_HOLD(hold);
125
Eugen Hristev2be357a2019-09-11 08:24:31 +0000126 dev->filter_width = filter_width;
127
128 dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
129 cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
130 t->digital_filter_width_ns);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100131}
132
Ludovic Desroches60937b22012-11-23 10:09:04 +0100133static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
134{
135 struct at91_twi_dma *dma = &dev->dma;
136
137 at91_twi_irq_save(dev);
138
139 if (dma->xfer_in_progress) {
140 if (dma->direction == DMA_FROM_DEVICE)
Wolfram Sang73c76332021-06-23 11:59:35 +0200141 dmaengine_terminate_sync(dma->chan_rx);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100142 else
Wolfram Sang73c76332021-06-23 11:59:35 +0200143 dmaengine_terminate_sync(dma->chan_tx);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100144 dma->xfer_in_progress = false;
145 }
146 if (dma->buf_mapped) {
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200147 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
Ludovic Desroches60937b22012-11-23 10:09:04 +0100148 dev->buf_len, dma->direction);
149 dma->buf_mapped = false;
150 }
151
152 at91_twi_irq_restore(dev);
153}
154
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100155static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
156{
Cyrille Pitchenf30dc522015-06-11 11:16:32 +0200157 if (!dev->buf_len)
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100158 return;
159
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200160 /* 8bit write works with and without FIFO */
161 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100162
163 /* send stop when last byte has been written */
Michał Mirosławd12e3aa2019-07-22 20:55:27 +0200164 if (--dev->buf_len == 0) {
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200165 if (!dev->use_alt_cmd)
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200166 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
Michał Mirosławd12e3aa2019-07-22 20:55:27 +0200167 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
168 }
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100169
Arvind Yadavf27e7802017-05-31 12:45:38 +0530170 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100171
172 ++dev->buf;
173}
174
Ludovic Desroches60937b22012-11-23 10:09:04 +0100175static void at91_twi_write_data_dma_callback(void *data)
176{
177 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
178
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200179 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
Wolfram Sang28772ac2014-07-21 11:42:03 +0200180 dev->buf_len, DMA_TO_DEVICE);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100181
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200182 /*
183 * When this callback is called, THR/TX FIFO is likely not to be empty
184 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
185 * Status Register to be sure that the STOP bit has been sent and the
186 * transfer is completed. The NACK interrupt has already been enabled,
187 * we just have to enable TXCOMP one.
188 */
189 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200190 if (!dev->use_alt_cmd)
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200191 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100192}
193
194static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
195{
196 dma_addr_t dma_addr;
197 struct dma_async_tx_descriptor *txdesc;
198 struct at91_twi_dma *dma = &dev->dma;
199 struct dma_chan *chan_tx = dma->chan_tx;
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200200 unsigned int sg_len = 1;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100201
Cyrille Pitchenf30dc522015-06-11 11:16:32 +0200202 if (!dev->buf_len)
Ludovic Desroches60937b22012-11-23 10:09:04 +0100203 return;
204
205 dma->direction = DMA_TO_DEVICE;
206
207 at91_twi_irq_save(dev);
208 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
209 DMA_TO_DEVICE);
210 if (dma_mapping_error(dev->dev, dma_addr)) {
211 dev_err(dev->dev, "dma map failed\n");
212 return;
213 }
214 dma->buf_mapped = true;
215 at91_twi_irq_restore(dev);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100216
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200217 if (dev->fifo_size) {
218 size_t part1_len, part2_len;
219 struct scatterlist *sg;
220 unsigned fifo_mr;
221
222 sg_len = 0;
223
224 part1_len = dev->buf_len & ~0x3;
225 if (part1_len) {
226 sg = &dma->sg[sg_len++];
227 sg_dma_len(sg) = part1_len;
228 sg_dma_address(sg) = dma_addr;
229 }
230
231 part2_len = dev->buf_len & 0x3;
232 if (part2_len) {
233 sg = &dma->sg[sg_len++];
234 sg_dma_len(sg) = part2_len;
235 sg_dma_address(sg) = dma_addr + part1_len;
236 }
237
238 /*
239 * DMA controller is triggered when at least 4 data can be
240 * written into the TX FIFO
241 */
242 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
243 fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
244 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
245 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
246 } else {
247 sg_dma_len(&dma->sg[0]) = dev->buf_len;
248 sg_dma_address(&dma->sg[0]) = dma_addr;
249 }
250
251 txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
252 DMA_MEM_TO_DEV,
Ludovic Desroches60937b22012-11-23 10:09:04 +0100253 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
254 if (!txdesc) {
255 dev_err(dev->dev, "dma prep slave sg failed\n");
256 goto error;
257 }
258
259 txdesc->callback = at91_twi_write_data_dma_callback;
260 txdesc->callback_param = dev;
261
262 dma->xfer_in_progress = true;
263 dmaengine_submit(txdesc);
264 dma_async_issue_pending(chan_tx);
265
266 return;
267
268error:
269 at91_twi_dma_cleanup(dev);
270}
271
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100272static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
273{
Ludovic Desrochesa9bed6b2015-10-26 10:38:27 +0100274 /*
275 * If we are in this case, it means there is garbage data in RHR, so
276 * delete them.
277 */
278 if (!dev->buf_len) {
279 at91_twi_read(dev, AT91_TWI_RHR);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100280 return;
Ludovic Desrochesa9bed6b2015-10-26 10:38:27 +0100281 }
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100282
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200283 /* 8bit read works with and without FIFO */
284 *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100285 --dev->buf_len;
286
Marek Roszko75b81f32014-08-20 21:39:41 -0400287 /* return if aborting, we only needed to read RHR to clear RXRDY*/
288 if (dev->recv_len_abort)
289 return;
290
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100291 /* handle I2C_SMBUS_BLOCK_DATA */
292 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
Marek Roszko75b81f32014-08-20 21:39:41 -0400293 /* ensure length byte is a valid value */
294 if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
295 dev->msg->flags &= ~I2C_M_RECV_LEN;
296 dev->buf_len += *dev->buf;
297 dev->msg->len = dev->buf_len + 1;
Arvind Yadavf27e7802017-05-31 12:45:38 +0530298 dev_dbg(dev->dev, "received block length %zu\n",
Marek Roszko75b81f32014-08-20 21:39:41 -0400299 dev->buf_len);
300 } else {
301 /* abort and send the stop by reading one more byte */
302 dev->recv_len_abort = true;
303 dev->buf_len = 1;
304 }
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100305 }
306
307 /* send stop if second but last byte has been read */
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200308 if (!dev->use_alt_cmd && dev->buf_len == 1)
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100309 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
310
Arvind Yadavf27e7802017-05-31 12:45:38 +0530311 dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100312
313 ++dev->buf;
314}
315
Ludovic Desroches60937b22012-11-23 10:09:04 +0100316static void at91_twi_read_data_dma_callback(void *data)
317{
318 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200319 unsigned ier = AT91_TWI_TXCOMP;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100320
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200321 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
Wolfram Sang28772ac2014-07-21 11:42:03 +0200322 dev->buf_len, DMA_FROM_DEVICE);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100323
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200324 if (!dev->use_alt_cmd) {
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200325 /* The last two bytes have to be read without using dma */
326 dev->buf += dev->buf_len - 2;
327 dev->buf_len = 2;
328 ier |= AT91_TWI_RXRDY;
329 }
330 at91_twi_write(dev, AT91_TWI_IER, ier);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100331}
332
333static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
334{
335 dma_addr_t dma_addr;
336 struct dma_async_tx_descriptor *rxdesc;
337 struct at91_twi_dma *dma = &dev->dma;
338 struct dma_chan *chan_rx = dma->chan_rx;
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200339 size_t buf_len;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100340
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200341 buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100342 dma->direction = DMA_FROM_DEVICE;
343
344 /* Keep in mind that we won't use dma to read the last two bytes */
345 at91_twi_irq_save(dev);
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200346 dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100347 if (dma_mapping_error(dev->dev, dma_addr)) {
348 dev_err(dev->dev, "dma map failed\n");
349 return;
350 }
351 dma->buf_mapped = true;
352 at91_twi_irq_restore(dev);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100353
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200354 if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
355 unsigned fifo_mr;
356
357 /*
358 * DMA controller is triggered when at least 4 data can be
359 * read from the RX FIFO
360 */
361 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
362 fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
363 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
364 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
365 }
366
367 sg_dma_len(&dma->sg[0]) = buf_len;
368 sg_dma_address(&dma->sg[0]) = dma_addr;
369
370 rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
Ludovic Desroches60937b22012-11-23 10:09:04 +0100371 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
372 if (!rxdesc) {
373 dev_err(dev->dev, "dma prep slave sg failed\n");
374 goto error;
375 }
376
377 rxdesc->callback = at91_twi_read_data_dma_callback;
378 rxdesc->callback_param = dev;
379
380 dma->xfer_in_progress = true;
381 dmaengine_submit(rxdesc);
382 dma_async_issue_pending(dma->chan_rx);
383
384 return;
385
386error:
387 at91_twi_dma_cleanup(dev);
388}
389
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100390static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
391{
392 struct at91_twi_dev *dev = dev_id;
393 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
394 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
395
396 if (!irqstatus)
397 return IRQ_NONE;
Ludovic Desrochesa9bed6b2015-10-26 10:38:27 +0100398 /*
399 * In reception, the behavior of the twi device (before sama5d2) is
400 * weird. There is some magic about RXRDY flag! When a data has been
401 * almost received, the reception of a new one is anticipated if there
402 * is no stop command to send. That is the reason why ask for sending
403 * the stop command not on the last data but on the second last one.
404 *
405 * Unfortunately, we could still have the RXRDY flag set even if the
406 * transfer is done and we have read the last data. It might happen
407 * when the i2c slave device sends too quickly data after receiving the
408 * ack from the master. The data has been almost received before having
409 * the order to send stop. In this case, sending the stop command could
410 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
411 * the RXRDY interrupt first in order to not keep garbage data in the
412 * Receive Holding Register for the next transfer.
413 */
David Engrafe8f39e92018-04-26 11:53:14 +0200414 if (irqstatus & AT91_TWI_RXRDY) {
415 /*
416 * Read all available bytes at once by polling RXRDY usable w/
417 * and w/o FIFO. With FIFO enabled we could also read RXFL and
418 * avoid polling RXRDY.
419 */
420 do {
421 at91_twi_read_next_byte(dev);
422 } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
423 }
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100424
Cyrille Pitchen6f6ddbb2015-10-21 15:44:03 +0200425 /*
426 * When a NACK condition is detected, the I2C controller sets the NACK,
427 * TXCOMP and TXRDY bits all together in the Status Register (SR).
428 *
429 * 1 - Handling NACK errors with CPU write transfer.
430 *
431 * In such case, we should not write the next byte into the Transmit
432 * Holding Register (THR) otherwise the I2C controller would start a new
433 * transfer and the I2C slave is likely to reply by another NACK.
434 *
435 * 2 - Handling NACK errors with DMA write transfer.
436 *
437 * By setting the TXRDY bit in the SR, the I2C controller also triggers
438 * the DMA controller to write the next data into the THR. Then the
439 * result depends on the hardware version of the I2C controller.
440 *
441 * 2a - Without support of the Alternative Command mode.
442 *
443 * This is the worst case: the DMA controller is triggered to write the
444 * next data into the THR, hence starting a new transfer: the I2C slave
445 * is likely to reply by another NACK.
446 * Concurrently, this interrupt handler is likely to be called to manage
447 * the first NACK before the I2C controller detects the second NACK and
448 * sets once again the NACK bit into the SR.
449 * When handling the first NACK, this interrupt handler disables the I2C
450 * controller interruptions, especially the NACK interrupt.
451 * Hence, the NACK bit is pending into the SR. This is why we should
452 * read the SR to clear all pending interrupts at the beginning of
453 * at91_do_twi_transfer() before actually starting a new transfer.
454 *
455 * 2b - With support of the Alternative Command mode.
456 *
457 * When a NACK condition is detected, the I2C controller also locks the
458 * THR (and sets the LOCK bit in the SR): even though the DMA controller
459 * is triggered by the TXRDY bit to write the next data into the THR,
460 * this data actually won't go on the I2C bus hence a second NACK is not
461 * generated.
462 */
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200463 if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100464 at91_disable_twi_interrupts(dev);
465 complete(&dev->cmd_complete);
Cyrille Pitchen6f6ddbb2015-10-21 15:44:03 +0200466 } else if (irqstatus & AT91_TWI_TXRDY) {
467 at91_twi_write_next_byte(dev);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100468 }
469
Cyrille Pitchen6f6ddbb2015-10-21 15:44:03 +0200470 /* catch error flags */
471 dev->transfer_status |= status;
472
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100473 return IRQ_HANDLED;
474}
475
476static int at91_do_twi_transfer(struct at91_twi_dev *dev)
477{
478 int ret;
Nicholas Mc Guire1c42aca2015-02-08 11:12:07 -0500479 unsigned long time_left;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100480 bool has_unre_flag = dev->pdata->has_unre_flag;
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200481 bool has_alt_cmd = dev->pdata->has_alt_cmd;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100482
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200483 /*
484 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
485 * read flag but shows the state of the transmission at the time the
486 * Status Register is read. According to the programmer datasheet,
487 * TXCOMP is set when both holding register and internal shifter are
488 * empty and STOP condition has been sent.
489 * Consequently, we should enable NACK interrupt rather than TXCOMP to
490 * detect transmission failure.
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200491 * Indeed let's take the case of an i2c write command using DMA.
492 * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
493 * TXCOMP bits are set together into the Status Register.
494 * LOCK is a clear on write bit, which is set to prevent the DMA
495 * controller from sending new data on the i2c bus after a NACK
496 * condition has happened. Once locked, this i2c peripheral stops
497 * triggering the DMA controller for new data but it is more than
498 * likely that a new DMA transaction is already in progress, writing
499 * into the Transmit Holding Register. Since the peripheral is locked,
500 * these new data won't be sent to the i2c bus but they will remain
501 * into the Transmit Holding Register, so TXCOMP bit is cleared.
502 * Then when the interrupt handler is called, the Status Register is
503 * read: the TXCOMP bit is clear but NACK bit is still set. The driver
504 * manage the error properly, without waiting for timeout.
505 * This case can be reproduced easyly when writing into an at24 eeprom.
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200506 *
507 * Besides, the TXCOMP bit is already set before the i2c transaction
508 * has been started. For read transactions, this bit is cleared when
509 * writing the START bit into the Control Register. So the
510 * corresponding interrupt can safely be enabled just after.
511 * However for write transactions managed by the CPU, we first write
512 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
513 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
514 * the interrupt handler would be called immediately and the i2c command
515 * would be reported as completed.
516 * Also when a write transaction is managed by the DMA controller,
517 * enabling the TXCOMP interrupt in this function may lead to a race
518 * condition since we don't know whether the TXCOMP interrupt is enabled
519 * before or after the DMA has started to write into THR. So the TXCOMP
520 * interrupt is enabled later by at91_twi_write_data_dma_callback().
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200521 * Immediately after in that DMA callback, if the alternative command
522 * mode is not used, we still need to send the STOP condition manually
523 * writing the corresponding bit into the Control Register.
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200524 */
525
Arvind Yadavf27e7802017-05-31 12:45:38 +0530526 dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100527 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
528
Wolfram Sang16735d02013-11-14 14:32:02 -0800529 reinit_completion(&dev->cmd_complete);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100530 dev->transfer_status = 0;
Ludovic Desroches7c3fe642012-11-13 16:43:21 +0100531
Cyrille Pitchen6f6ddbb2015-10-21 15:44:03 +0200532 /* Clear pending interrupts, such as NACK. */
Ludovic Desrochesa9bed6b2015-10-26 10:38:27 +0100533 at91_twi_read(dev, AT91_TWI_SR);
Cyrille Pitchen6f6ddbb2015-10-21 15:44:03 +0200534
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200535 if (dev->fifo_size) {
536 unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
537
538 /* Reset FIFO mode register */
539 fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
540 AT91_TWI_FMR_RXRDYM_MASK);
541 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
542 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
543 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
544
545 /* Flush FIFOs */
546 at91_twi_write(dev, AT91_TWI_CR,
547 AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
548 }
549
Ludovic Desroches7c3fe642012-11-13 16:43:21 +0100550 if (!dev->buf_len) {
551 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
552 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
553 } else if (dev->msg->flags & I2C_M_RD) {
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100554 unsigned start_flags = AT91_TWI_START;
555
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100556 /* if only one byte is to be read, immediately stop transfer */
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200557 if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200558 !(dev->msg->flags & I2C_M_RECV_LEN))
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100559 start_flags |= AT91_TWI_STOP;
560 at91_twi_write(dev, AT91_TWI_CR, start_flags);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100561 /*
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200562 * When using dma without alternative command mode, the last
563 * byte has to be read manually in order to not send the stop
564 * command too late and then to receive extra data.
565 * In practice, there are some issues if you use the dma to
566 * read n-1 bytes because of latency.
Ludovic Desroches60937b22012-11-23 10:09:04 +0100567 * Reading n-2 bytes with dma and the two last ones manually
568 * seems to be the best solution.
569 */
570 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200571 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100572 at91_twi_read_data_dma(dev);
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200573 } else {
Ludovic Desroches60937b22012-11-23 10:09:04 +0100574 at91_twi_write(dev, AT91_TWI_IER,
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200575 AT91_TWI_TXCOMP |
576 AT91_TWI_NACK |
577 AT91_TWI_RXRDY);
578 }
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100579 } else {
Ludovic Desroches60937b22012-11-23 10:09:04 +0100580 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
Cyrille Pitchen93563a62015-06-09 18:22:14 +0200581 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100582 at91_twi_write_data_dma(dev);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100583 } else {
584 at91_twi_write_next_byte(dev);
585 at91_twi_write(dev, AT91_TWI_IER,
Michał Mirosławd12e3aa2019-07-22 20:55:27 +0200586 AT91_TWI_TXCOMP | AT91_TWI_NACK |
587 (dev->buf_len ? AT91_TWI_TXRDY : 0));
Ludovic Desroches60937b22012-11-23 10:09:04 +0100588 }
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100589 }
590
Nicholas Mc Guire1c42aca2015-02-08 11:12:07 -0500591 time_left = wait_for_completion_timeout(&dev->cmd_complete,
592 dev->adapter.timeout);
593 if (time_left == 0) {
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200594 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100595 dev_err(dev->dev, "controller timed out\n");
596 at91_init_twi_bus(dev);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100597 ret = -ETIMEDOUT;
598 goto error;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100599 }
600 if (dev->transfer_status & AT91_TWI_NACK) {
601 dev_dbg(dev->dev, "received nack\n");
Ludovic Desroches60937b22012-11-23 10:09:04 +0100602 ret = -EREMOTEIO;
603 goto error;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100604 }
605 if (dev->transfer_status & AT91_TWI_OVRE) {
606 dev_err(dev->dev, "overrun while reading\n");
Ludovic Desroches60937b22012-11-23 10:09:04 +0100607 ret = -EIO;
608 goto error;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100609 }
610 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
611 dev_err(dev->dev, "underrun while writing\n");
Ludovic Desroches60937b22012-11-23 10:09:04 +0100612 ret = -EIO;
613 goto error;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100614 }
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200615 if ((has_alt_cmd || dev->fifo_size) &&
616 (dev->transfer_status & AT91_TWI_LOCK)) {
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200617 dev_err(dev->dev, "tx locked\n");
618 ret = -EIO;
619 goto error;
620 }
Marek Roszko75b81f32014-08-20 21:39:41 -0400621 if (dev->recv_len_abort) {
622 dev_err(dev->dev, "invalid smbus block length recvd\n");
623 ret = -EPROTO;
624 goto error;
625 }
626
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100627 dev_dbg(dev->dev, "transfer complete\n");
628
629 return 0;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100630
631error:
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200632 /* first stop DMA transfer if still in progress */
Ludovic Desroches60937b22012-11-23 10:09:04 +0100633 at91_twi_dma_cleanup(dev);
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200634 /* then flush THR/FIFO and unlock TX if locked */
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200635 if ((has_alt_cmd || dev->fifo_size) &&
636 (dev->transfer_status & AT91_TWI_LOCK)) {
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200637 dev_dbg(dev->dev, "unlock tx\n");
638 at91_twi_write(dev, AT91_TWI_CR,
639 AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
640 }
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +0200641
Codrin Ciubotariu73371d52020-02-25 17:50:09 +0200642 /*
643 * some faulty I2C slave devices might hold SDA down;
644 * we can send a bus clear command, hoping that the pins will be
645 * released
646 */
647 i2c_recover_bus(&dev->adapter);
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +0200648
Ludovic Desroches60937b22012-11-23 10:09:04 +0100649 return ret;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100650}
651
652static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
653{
654 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
655 int ret;
656 unsigned int_addr_flag = 0;
657 struct i2c_msg *m_start = msg;
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200658 bool is_read;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100659
660 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
661
Wenyou Yangd64a8182014-10-24 14:50:15 +0800662 ret = pm_runtime_get_sync(dev->dev);
663 if (ret < 0)
664 goto out;
665
Wolfram Sanga7405842015-01-07 12:24:10 +0100666 if (num == 2) {
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100667 int internal_address = 0;
668 int i;
669
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100670 /* 1st msg is put into the internal address, start with 2nd */
671 m_start = &msg[1];
672 for (i = 0; i < msg->len; ++i) {
673 const unsigned addr = msg->buf[msg->len - 1 - i];
674
675 internal_address |= addr << (8 * i);
676 int_addr_flag += AT91_TWI_IADRSZ_1;
677 }
678 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
679 }
680
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200681 dev->use_alt_cmd = false;
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200682 is_read = (m_start->flags & I2C_M_RD);
683 if (dev->pdata->has_alt_cmd) {
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200684 if (m_start->len > 0 &&
685 m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200686 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
687 at91_twi_write(dev, AT91_TWI_ACR,
688 AT91_TWI_ACR_DATAL(m_start->len) |
689 ((is_read) ? AT91_TWI_ACR_DIR : 0));
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200690 dev->use_alt_cmd = true;
Cyrille Pitchen0ef6f322015-06-09 18:22:17 +0200691 } else {
692 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
693 }
694 }
695
696 at91_twi_write(dev, AT91_TWI_MMR,
697 (m_start->addr << 16) |
698 int_addr_flag |
Cyrille Pitchen434f14e2016-08-03 16:58:26 +0200699 ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100700
701 dev->buf_len = m_start->len;
702 dev->buf = m_start->buf;
703 dev->msg = m_start;
Marek Roszko75b81f32014-08-20 21:39:41 -0400704 dev->recv_len_abort = false;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100705
706 ret = at91_do_twi_transfer(dev);
707
Wenyou Yangd64a8182014-10-24 14:50:15 +0800708 ret = (ret < 0) ? ret : num;
709out:
710 pm_runtime_mark_last_busy(dev->dev);
711 pm_runtime_put_autosuspend(dev->dev);
712
713 return ret;
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100714}
715
Wolfram Sanga7405842015-01-07 12:24:10 +0100716/*
717 * The hardware can handle at most two messages concatenated by a
718 * repeated start via it's internal address feature.
719 */
Bhumika Goyalae3923a2017-08-21 17:42:04 +0530720static const struct i2c_adapter_quirks at91_twi_quirks = {
Wolfram Sanga7405842015-01-07 12:24:10 +0100721 .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
722 .max_comb_1st_msg_len = 3,
723};
724
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100725static u32 at91_twi_func(struct i2c_adapter *adapter)
726{
727 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
728 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
729}
730
Bhumika Goyal92d9d0d2017-01-27 23:36:17 +0530731static const struct i2c_algorithm at91_twi_algorithm = {
Nikolaus Vossfac368a2011-11-08 11:49:46 +0100732 .master_xfer = at91_twi_xfer,
733 .functionality = at91_twi_func,
734};
735
Bill Pemberton0b255e92012-11-27 15:59:38 -0500736static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
Ludovic Desroches60937b22012-11-23 10:09:04 +0100737{
738 int ret = 0;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100739 struct dma_slave_config slave_config;
740 struct at91_twi_dma *dma = &dev->dma;
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200741 enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
742
743 /*
744 * The actual width of the access will be chosen in
745 * dmaengine_prep_slave_sg():
746 * for each buffer in the scatter-gather list, if its size is aligned
747 * to addr_width then addr_width accesses will be performed to transfer
748 * the buffer. On the other hand, if the buffer size is not aligned to
749 * addr_width then the buffer is transferred using single byte accesses.
750 * Please refer to the Atmel eXtended DMA controller driver.
751 * When FIFOs are used, the TXRDYM threshold can always be set to
752 * trigger the XDMAC when at least 4 data can be written into the TX
753 * FIFO, even if single byte accesses are performed.
754 * However the RXRDYM threshold must be set to fit the access width,
755 * deduced from buffer length, so the XDMAC is triggered properly to
756 * read data from the RX FIFO.
757 */
758 if (dev->fifo_size)
759 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100760
761 memset(&slave_config, 0, sizeof(slave_config));
762 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200763 slave_config.src_addr_width = addr_width;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100764 slave_config.src_maxburst = 1;
765 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200766 slave_config.dst_addr_width = addr_width;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100767 slave_config.dst_maxburst = 1;
768 slave_config.device_fc = false;
769
Peter Ujfalusia2b0e392019-11-13 11:22:32 +0200770 dma->chan_tx = dma_request_chan(dev->dev, "tx");
Ludovic Desroches727f9c22014-11-21 14:44:32 +0100771 if (IS_ERR(dma->chan_tx)) {
772 ret = PTR_ERR(dma->chan_tx);
773 dma->chan_tx = NULL;
Ludovic Desrochesd877a722013-04-15 02:16:56 +0000774 goto error;
775 }
776
Peter Ujfalusia2b0e392019-11-13 11:22:32 +0200777 dma->chan_rx = dma_request_chan(dev->dev, "rx");
Ludovic Desroches727f9c22014-11-21 14:44:32 +0100778 if (IS_ERR(dma->chan_rx)) {
779 ret = PTR_ERR(dma->chan_rx);
780 dma->chan_rx = NULL;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100781 goto error;
782 }
783
784 slave_config.direction = DMA_MEM_TO_DEV;
785 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
786 dev_err(dev->dev, "failed to configure tx channel\n");
787 ret = -EINVAL;
788 goto error;
789 }
790
791 slave_config.direction = DMA_DEV_TO_MEM;
792 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
793 dev_err(dev->dev, "failed to configure rx channel\n");
794 ret = -EINVAL;
795 goto error;
796 }
797
Cyrille Pitchen5e3cfc62015-06-09 18:22:19 +0200798 sg_init_table(dma->sg, 2);
Ludovic Desroches60937b22012-11-23 10:09:04 +0100799 dma->buf_mapped = false;
800 dma->xfer_in_progress = false;
Ludovic Desroches727f9c22014-11-21 14:44:32 +0100801 dev->use_dma = true;
Ludovic Desroches60937b22012-11-23 10:09:04 +0100802
803 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
804 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
805
806 return ret;
807
808error:
Ludovic Desroches727f9c22014-11-21 14:44:32 +0100809 if (ret != -EPROBE_DEFER)
Ludovic Desroches67fed0d2016-05-20 14:06:31 +0200810 dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
Ludovic Desroches60937b22012-11-23 10:09:04 +0100811 if (dma->chan_rx)
812 dma_release_channel(dma->chan_rx);
813 if (dma->chan_tx)
814 dma_release_channel(dma->chan_tx);
815 return ret;
816}
817
Codrin Ciubotariu73371d52020-02-25 17:50:09 +0200818static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +0200819 struct at91_twi_dev *dev)
820{
821 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
822
Codrin Ciubotariu543aa2c2020-08-04 12:59:26 +0300823 rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
824 if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +0200825 dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
Codrin Ciubotariu543aa2c2020-08-04 12:59:26 +0300826 return PTR_ERR(rinfo->pinctrl);
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +0200827 }
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +0200828 dev->adapter.bus_recovery_info = rinfo;
829
830 return 0;
831}
832
Codrin Ciubotariu73371d52020-02-25 17:50:09 +0200833static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap)
834{
835 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
836
837 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
838 if (!(dev->transfer_status & AT91_TWI_SDA)) {
839 dev_dbg(dev->dev, "SDA is down; sending bus clear command\n");
840 if (dev->use_alt_cmd) {
841 unsigned int acr;
842
843 acr = at91_twi_read(dev, AT91_TWI_ACR);
844 acr &= ~AT91_TWI_ACR_DATAL_MASK;
845 at91_twi_write(dev, AT91_TWI_ACR, acr);
846 }
847 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR);
848 }
849
850 return 0;
851}
852
853static int at91_init_twi_recovery_info(struct platform_device *pdev,
854 struct at91_twi_dev *dev)
855{
856 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
857 bool has_clear_cmd = dev->pdata->has_clear_cmd;
858
859 if (!has_clear_cmd)
860 return at91_init_twi_recovery_gpio(pdev, dev);
861
862 rinfo->recover_bus = at91_twi_recover_bus_cmd;
863 dev->adapter.bus_recovery_info = rinfo;
864
865 return 0;
866}
867
Juergen Fitschenad7d1422019-02-22 10:25:21 +0100868int at91_twi_probe_master(struct platform_device *pdev,
869 u32 phy_addr, struct at91_twi_dev *dev)
Juergen Fitschen07345ab2019-02-22 10:25:20 +0100870{
871 int rc;
Juergen Fitschen07345ab2019-02-22 10:25:20 +0100872
873 init_completion(&dev->cmd_complete);
874
875 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
876 dev_name(dev->dev), dev);
877 if (rc) {
878 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
879 return rc;
880 }
881
882 if (dev->dev->of_node) {
883 rc = at91_twi_configure_dma(dev, phy_addr);
884 if (rc == -EPROBE_DEFER)
885 return rc;
886 }
887
888 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
889 &dev->fifo_size)) {
890 dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
891 }
892
Eugen Hristev2989b452019-09-11 08:24:28 +0000893 dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
894 "i2c-digital-filter");
895
Eugen Hristevdda96712019-09-11 08:24:34 +0000896 dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
897 "i2c-analog-filter");
Andy Shevchenko265bd822019-03-21 12:36:34 +0300898 at91_calc_twi_clock(dev);
Juergen Fitschen07345ab2019-02-22 10:25:20 +0100899
Kamel Bouharad3d3fdc2020-01-15 13:54:18 +0200900 rc = at91_init_twi_recovery_info(pdev, dev);
901 if (rc == -EPROBE_DEFER)
902 return rc;
903
Juergen Fitschen07345ab2019-02-22 10:25:20 +0100904 dev->adapter.algo = &at91_twi_algorithm;
905 dev->adapter.quirks = &at91_twi_quirks;
906
907 return 0;
908}