blob: 09f33f6006a3c7a083fc49129a23ac45a5c9fdaf [file] [log] [blame]
Fabio Estevamb04a3fe2018-07-07 14:25:20 -03001// SPDX-License-Identifier: GPL-2.0+
Huang Shijie10a2bca2011-09-08 10:47:09 +08002/*
3 * Freescale GPMI NAND Flash Driver
4 *
Huang Shijie026918e2015-12-02 16:47:40 -06005 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
Huang Shijie10a2bca2011-09-08 10:47:09 +08006 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Huang Shijie10a2bca2011-09-08 10:47:09 +08007 */
8#include <linux/clk.h>
9#include <linux/slab.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010010#include <linux/sched/task_stack.h>
Huang Shijie10a2bca2011-09-08 10:47:09 +080011#include <linux/interrupt.h>
Wolfram Sangdf16c862011-11-23 15:57:06 +010012#include <linux/module.h>
Huang Shijie10a2bca2011-09-08 10:47:09 +080013#include <linux/mtd/partitions.h>
Huang Shijiee10db1f2012-05-04 21:42:05 -040014#include <linux/of.h>
15#include <linux/of_device.h>
Huang Shijie10a2bca2011-09-08 10:47:09 +080016#include "gpmi-nand.h"
Huang Shijieb8e29312014-01-03 11:01:42 +080017#include "bch-regs.h"
Huang Shijie10a2bca2011-09-08 10:47:09 +080018
Huang Shijie5de0b522012-10-13 13:03:29 -040019/* Resource names for the GPMI NAND driver. */
20#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
21#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
22#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
Huang Shijie5de0b522012-10-13 13:03:29 -040023
Huang Shijie10a2bca2011-09-08 10:47:09 +080024/* add our owner bbt descriptor */
25static uint8_t scan_ff_pattern[] = { 0xff };
26static struct nand_bbt_descr gpmi_bbt_descr = {
27 .options = 0,
28 .offs = 0,
29 .len = 1,
30 .pattern = scan_ff_pattern
31};
32
Huang Shijie7a2b89a2013-09-25 14:58:15 +080033/*
34 * We may change the layout if we can get the ECC info from the datasheet,
35 * else we will use all the (page + OOB).
36 */
Boris Brezillon3f158e42016-02-03 20:01:54 +010037static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
38 struct mtd_oob_region *oobregion)
39{
40 struct nand_chip *chip = mtd_to_nand(mtd);
41 struct gpmi_nand_data *this = nand_get_controller_data(chip);
42 struct bch_geometry *geo = &this->bch_geometry;
43
44 if (section)
45 return -ERANGE;
46
47 oobregion->offset = 0;
48 oobregion->length = geo->page_size - mtd->writesize;
49
50 return 0;
51}
52
53static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
54 struct mtd_oob_region *oobregion)
55{
56 struct nand_chip *chip = mtd_to_nand(mtd);
57 struct gpmi_nand_data *this = nand_get_controller_data(chip);
58 struct bch_geometry *geo = &this->bch_geometry;
59
60 if (section)
61 return -ERANGE;
62
63 /* The available oob size we have. */
64 if (geo->page_size < mtd->writesize + mtd->oobsize) {
65 oobregion->offset = geo->page_size - mtd->writesize;
66 oobregion->length = mtd->oobsize - oobregion->offset;
67 }
68
69 return 0;
70}
71
Stefan Agner6b7ee722017-04-21 18:23:34 -070072static const char * const gpmi_clks_for_mx2x[] = {
73 "gpmi_io",
74};
75
Boris Brezillon3f158e42016-02-03 20:01:54 +010076static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
77 .ecc = gpmi_ooblayout_ecc,
78 .free = gpmi_ooblayout_free,
Huang Shijie10a2bca2011-09-08 10:47:09 +080079};
80
Huang Shijie6189ccc2014-03-21 18:19:39 +080081static const struct gpmi_devdata gpmi_devdata_imx23 = {
82 .type = IS_MX23,
83 .bch_max_ecc_strength = 20,
Miquel Raynalb1206122018-03-02 15:38:40 +010084 .max_chain_delay = 16000,
Stefan Agner6b7ee722017-04-21 18:23:34 -070085 .clks = gpmi_clks_for_mx2x,
86 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
Huang Shijie6189ccc2014-03-21 18:19:39 +080087};
88
89static const struct gpmi_devdata gpmi_devdata_imx28 = {
90 .type = IS_MX28,
91 .bch_max_ecc_strength = 20,
Miquel Raynalb1206122018-03-02 15:38:40 +010092 .max_chain_delay = 16000,
Stefan Agner6b7ee722017-04-21 18:23:34 -070093 .clks = gpmi_clks_for_mx2x,
94 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
95};
96
97static const char * const gpmi_clks_for_mx6[] = {
98 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
Huang Shijie6189ccc2014-03-21 18:19:39 +080099};
100
101static const struct gpmi_devdata gpmi_devdata_imx6q = {
102 .type = IS_MX6Q,
103 .bch_max_ecc_strength = 40,
Miquel Raynalb1206122018-03-02 15:38:40 +0100104 .max_chain_delay = 12000,
Stefan Agner6b7ee722017-04-21 18:23:34 -0700105 .clks = gpmi_clks_for_mx6,
106 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
Huang Shijie6189ccc2014-03-21 18:19:39 +0800107};
108
Huang Shijie91f54982014-03-27 10:43:22 +0800109static const struct gpmi_devdata gpmi_devdata_imx6sx = {
110 .type = IS_MX6SX,
111 .bch_max_ecc_strength = 62,
Miquel Raynalb1206122018-03-02 15:38:40 +0100112 .max_chain_delay = 12000,
Stefan Agner6b7ee722017-04-21 18:23:34 -0700113 .clks = gpmi_clks_for_mx6,
114 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
Huang Shijie91f54982014-03-27 10:43:22 +0800115};
116
Stefan Agnerb4af6942017-04-21 18:23:35 -0700117static const char * const gpmi_clks_for_mx7d[] = {
118 "gpmi_io", "gpmi_bch_apb",
119};
120
121static const struct gpmi_devdata gpmi_devdata_imx7d = {
122 .type = IS_MX7D,
123 .bch_max_ecc_strength = 62,
Miquel Raynalb1206122018-03-02 15:38:40 +0100124 .max_chain_delay = 12000,
Stefan Agnerb4af6942017-04-21 18:23:35 -0700125 .clks = gpmi_clks_for_mx7d,
126 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
127};
128
Huang Shijie10a2bca2011-09-08 10:47:09 +0800129static irqreturn_t bch_irq(int irq, void *cookie)
130{
131 struct gpmi_nand_data *this = cookie;
132
133 gpmi_clear_bch(this);
134 complete(&this->bch_done);
135 return IRQ_HANDLED;
136}
137
138/*
139 * Calculate the ECC strength by hand:
140 * E : The ECC strength.
141 * G : the length of Galois Field.
142 * N : The chunk count of per page.
143 * O : the oobsize of the NAND chip.
144 * M : the metasize of per page.
145 *
146 * The formula is :
147 * E * G * N
148 * ------------ <= (O - M)
149 * 8
150 *
151 * So, we get E by:
152 * (O - M) * 8
153 * E <= -------------
154 * G * N
155 */
156static inline int get_ecc_strength(struct gpmi_nand_data *this)
157{
158 struct bch_geometry *geo = &this->bch_geometry;
Boris BREZILLON2a690b22015-12-10 09:00:07 +0100159 struct mtd_info *mtd = nand_to_mtd(&this->nand);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800160 int ecc_strength;
161
162 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
163 / (geo->gf_len * geo->ecc_chunk_count);
164
165 /* We need the minor even number. */
166 return round_down(ecc_strength, 2);
167}
168
Huang Shijie92d0e092013-01-29 09:23:38 +0800169static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
170{
171 struct bch_geometry *geo = &this->bch_geometry;
172
173 /* Do the sanity check. */
174 if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
175 /* The mx23/mx28 only support the GF13. */
176 if (geo->gf_len == 14)
177 return false;
Huang Shijie92d0e092013-01-29 09:23:38 +0800178 }
Huang Shijie6189ccc2014-03-21 18:19:39 +0800179 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
Huang Shijie92d0e092013-01-29 09:23:38 +0800180}
181
Huang Shijie2febcdf2013-05-17 11:17:34 +0800182/*
183 * If we can get the ECC information from the nand chip, we do not
184 * need to calculate them ourselves.
185 *
186 * We may have available oob space in this case.
187 */
Stefan Agner6bf6ec52018-03-04 21:06:01 +0100188static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
189 unsigned int ecc_strength,
190 unsigned int ecc_step)
Huang Shijie2febcdf2013-05-17 11:17:34 +0800191{
192 struct bch_geometry *geo = &this->bch_geometry;
Boris BREZILLON2a690b22015-12-10 09:00:07 +0100193 struct nand_chip *chip = &this->nand;
194 struct mtd_info *mtd = nand_to_mtd(chip);
Huang Shijie2febcdf2013-05-17 11:17:34 +0800195 unsigned int block_mark_bit_offset;
196
Stefan Agner6bf6ec52018-03-04 21:06:01 +0100197 switch (ecc_step) {
Huang Shijie2febcdf2013-05-17 11:17:34 +0800198 case SZ_512:
199 geo->gf_len = 13;
200 break;
201 case SZ_1K:
202 geo->gf_len = 14;
203 break;
204 default:
205 dev_err(this->dev,
206 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
207 chip->ecc_strength_ds, chip->ecc_step_ds);
Han Xub8b0e462015-12-02 16:47:43 -0600208 return -EINVAL;
Huang Shijie2febcdf2013-05-17 11:17:34 +0800209 }
Stefan Agner6bf6ec52018-03-04 21:06:01 +0100210 geo->ecc_chunk_size = ecc_step;
211 geo->ecc_strength = round_up(ecc_strength, 2);
Huang Shijie2febcdf2013-05-17 11:17:34 +0800212 if (!gpmi_check_ecc(this))
Han Xub8b0e462015-12-02 16:47:43 -0600213 return -EINVAL;
Huang Shijie2febcdf2013-05-17 11:17:34 +0800214
215 /* Keep the C >= O */
216 if (geo->ecc_chunk_size < mtd->oobsize) {
217 dev_err(this->dev,
218 "unsupported nand chip. ecc size: %d, oob size : %d\n",
Stefan Agner6bf6ec52018-03-04 21:06:01 +0100219 ecc_step, mtd->oobsize);
Han Xub8b0e462015-12-02 16:47:43 -0600220 return -EINVAL;
Huang Shijie2febcdf2013-05-17 11:17:34 +0800221 }
222
223 /* The default value, see comment in the legacy_set_geometry(). */
224 geo->metadata_size = 10;
225
226 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
227
228 /*
229 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
230 *
231 * | P |
232 * |<----------------------------------------------------->|
233 * | |
234 * | (Block Mark) |
235 * | P' | | | |
236 * |<-------------------------------------------->| D | | O' |
237 * | |<---->| |<--->|
238 * V V V V V
239 * +---+----------+-+----------+-+----------+-+----------+-+-----+
240 * | M | data |E| data |E| data |E| data |E| |
241 * +---+----------+-+----------+-+----------+-+----------+-+-----+
242 * ^ ^
243 * | O |
244 * |<------------>|
245 * | |
246 *
247 * P : the page size for BCH module.
248 * E : The ECC strength.
249 * G : the length of Galois Field.
250 * N : The chunk count of per page.
251 * M : the metasize of per page.
252 * C : the ecc chunk size, aka the "data" above.
253 * P': the nand chip's page size.
254 * O : the nand chip's oob size.
255 * O': the free oob.
256 *
257 * The formula for P is :
258 *
259 * E * G * N
260 * P = ------------ + P' + M
261 * 8
262 *
263 * The position of block mark moves forward in the ECC-based view
264 * of page, and the delta is:
265 *
266 * E * G * (N - 1)
267 * D = (---------------- + M)
268 * 8
269 *
270 * Please see the comment in legacy_set_geometry().
271 * With the condition C >= O , we still can get same result.
272 * So the bit position of the physical block mark within the ECC-based
273 * view of the page is :
274 * (P' - D) * 8
275 */
276 geo->page_size = mtd->writesize + geo->metadata_size +
277 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
278
Huang Shijie2febcdf2013-05-17 11:17:34 +0800279 geo->payload_size = mtd->writesize;
280
281 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
282 geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
283 + ALIGN(geo->ecc_chunk_count, 4);
284
285 if (!this->swap_block_mark)
Han Xub8b0e462015-12-02 16:47:43 -0600286 return 0;
Huang Shijie2febcdf2013-05-17 11:17:34 +0800287
288 /* For bit swap. */
289 block_mark_bit_offset = mtd->writesize * 8 -
290 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
291 + geo->metadata_size * 8);
292
293 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
294 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
Han Xub8b0e462015-12-02 16:47:43 -0600295 return 0;
Huang Shijie2febcdf2013-05-17 11:17:34 +0800296}
297
298static int legacy_set_geometry(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800299{
300 struct bch_geometry *geo = &this->bch_geometry;
Boris BREZILLON2a690b22015-12-10 09:00:07 +0100301 struct mtd_info *mtd = nand_to_mtd(&this->nand);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800302 unsigned int metadata_size;
303 unsigned int status_size;
304 unsigned int block_mark_bit_offset;
305
306 /*
307 * The size of the metadata can be changed, though we set it to 10
308 * bytes now. But it can't be too large, because we have to save
309 * enough space for BCH.
310 */
311 geo->metadata_size = 10;
312
313 /* The default for the length of Galois Field. */
314 geo->gf_len = 13;
315
Huang Shijie9ff16f02013-01-25 14:04:07 +0800316 /* The default for chunk size. */
Huang Shijie10a2bca2011-09-08 10:47:09 +0800317 geo->ecc_chunk_size = 512;
Huang Shijie9ff16f02013-01-25 14:04:07 +0800318 while (geo->ecc_chunk_size < mtd->oobsize) {
Huang Shijie10a2bca2011-09-08 10:47:09 +0800319 geo->ecc_chunk_size *= 2; /* keep C >= O */
Huang Shijie9ff16f02013-01-25 14:04:07 +0800320 geo->gf_len = 14;
321 }
Huang Shijie10a2bca2011-09-08 10:47:09 +0800322
323 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
324
325 /* We use the same ECC strength for all chunks. */
326 geo->ecc_strength = get_ecc_strength(this);
Huang Shijie92d0e092013-01-29 09:23:38 +0800327 if (!gpmi_check_ecc(this)) {
328 dev_err(this->dev,
Han Xub8b0e462015-12-02 16:47:43 -0600329 "ecc strength: %d cannot be supported by the controller (%d)\n"
330 "try to use minimum ecc strength that NAND chip required\n",
Lothar Waßmannd8c03722014-06-12 15:20:42 +0200331 geo->ecc_strength,
Huang Shijie6189ccc2014-03-21 18:19:39 +0800332 this->devdata->bch_max_ecc_strength);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800333 return -EINVAL;
334 }
335
Han Xu1848a052016-04-12 17:06:33 -0500336 geo->page_size = mtd->writesize + geo->metadata_size +
337 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800338 geo->payload_size = mtd->writesize;
339
340 /*
341 * The auxiliary buffer contains the metadata and the ECC status. The
342 * metadata is padded to the nearest 32-bit boundary. The ECC status
343 * contains one byte for every ECC chunk, and is also padded to the
344 * nearest 32-bit boundary.
345 */
346 metadata_size = ALIGN(geo->metadata_size, 4);
347 status_size = ALIGN(geo->ecc_chunk_count, 4);
348
349 geo->auxiliary_size = metadata_size + status_size;
350 geo->auxiliary_status_offset = metadata_size;
351
352 if (!this->swap_block_mark)
353 return 0;
354
355 /*
356 * We need to compute the byte and bit offsets of
357 * the physical block mark within the ECC-based view of the page.
358 *
359 * NAND chip with 2K page shows below:
360 * (Block Mark)
361 * | |
362 * | D |
363 * |<---->|
364 * V V
365 * +---+----------+-+----------+-+----------+-+----------+-+
366 * | M | data |E| data |E| data |E| data |E|
367 * +---+----------+-+----------+-+----------+-+----------+-+
368 *
369 * The position of block mark moves forward in the ECC-based view
370 * of page, and the delta is:
371 *
372 * E * G * (N - 1)
373 * D = (---------------- + M)
374 * 8
375 *
376 * With the formula to compute the ECC strength, and the condition
377 * : C >= O (C is the ecc chunk size)
378 *
379 * It's easy to deduce to the following result:
380 *
381 * E * G (O - M) C - M C - M
382 * ----------- <= ------- <= -------- < ---------
383 * 8 N N (N - 1)
384 *
385 * So, we get:
386 *
387 * E * G * (N - 1)
388 * D = (---------------- + M) < C
389 * 8
390 *
391 * The above inequality means the position of block mark
392 * within the ECC-based view of the page is still in the data chunk,
393 * and it's NOT in the ECC bits of the chunk.
394 *
395 * Use the following to compute the bit position of the
396 * physical block mark within the ECC-based view of the page:
397 * (page_size - D) * 8
398 *
399 * --Huang Shijie
400 */
401 block_mark_bit_offset = mtd->writesize * 8 -
402 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
403 + geo->metadata_size * 8);
404
405 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
406 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
407 return 0;
408}
409
Huang Shijie2febcdf2013-05-17 11:17:34 +0800410int common_nfc_set_geometry(struct gpmi_nand_data *this)
411{
Stefan Agner6bf6ec52018-03-04 21:06:01 +0100412 struct nand_chip *chip = &this->nand;
413
414 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
415 return set_geometry_by_ecc_info(this, chip->ecc.strength,
416 chip->ecc.size);
417
Han Xub8b0e462015-12-02 16:47:43 -0600418 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
Stefan Agner6bf6ec52018-03-04 21:06:01 +0100419 || legacy_set_geometry(this)) {
420 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
421 return -EINVAL;
422
423 return set_geometry_by_ecc_info(this, chip->ecc_strength_ds,
424 chip->ecc_step_ds);
425 }
Han Xub8b0e462015-12-02 16:47:43 -0600426
427 return 0;
Huang Shijie2febcdf2013-05-17 11:17:34 +0800428}
429
Huang Shijie10a2bca2011-09-08 10:47:09 +0800430struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
431{
Huang Shijiea7c12d02013-08-27 17:29:05 +0800432 /* We use the DMA channel 0 to access all the nand chips. */
433 return this->dma_chans[0];
Huang Shijie10a2bca2011-09-08 10:47:09 +0800434}
435
436/* Can we use the upper's buffer directly for DMA? */
Sascha Hauer111bfed2018-04-26 17:41:25 +0200437bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len,
Sascha Hauerba3900e2018-04-26 17:41:23 +0200438 enum dma_data_direction dr)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800439{
440 struct scatterlist *sgl = &this->data_sgl;
441 int ret;
442
Huang Shijie10a2bca2011-09-08 10:47:09 +0800443 /* first try to map the upper buffer directly */
Sascha Hauerba3900e2018-04-26 17:41:23 +0200444 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
445 sg_init_one(sgl, buf, len);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800446 ret = dma_map_sg(this->dev, sgl, 1, dr);
447 if (ret == 0)
Huang Shijie0ff76a92013-12-18 23:41:00 +0800448 goto map_fail;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800449
Sascha Hauer111bfed2018-04-26 17:41:25 +0200450 return true;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800451 }
Huang Shijie0ff76a92013-12-18 23:41:00 +0800452
453map_fail:
454 /* We have to use our own DMA buffer. */
Sascha Hauerba3900e2018-04-26 17:41:23 +0200455 sg_init_one(sgl, this->data_buffer_dma, len);
Huang Shijie0ff76a92013-12-18 23:41:00 +0800456
457 if (dr == DMA_TO_DEVICE)
Sascha Hauerba3900e2018-04-26 17:41:23 +0200458 memcpy(this->data_buffer_dma, buf, len);
Huang Shijie0ff76a92013-12-18 23:41:00 +0800459
460 dma_map_sg(this->dev, sgl, 1, dr);
461
Sascha Hauer111bfed2018-04-26 17:41:25 +0200462 return false;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800463}
464
465/* This will be called after the DMA operation is finished. */
466static void dma_irq_callback(void *param)
467{
468 struct gpmi_nand_data *this = param;
469 struct completion *dma_c = &this->dma_done;
470
Huang Shijie7b3d2fb2013-11-11 12:13:45 +0800471 complete(dma_c);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800472}
473
474int start_dma_without_bch_irq(struct gpmi_nand_data *this,
475 struct dma_async_tx_descriptor *desc)
476{
477 struct completion *dma_c = &this->dma_done;
Nicholas Mc Guire706d5b22015-02-08 11:37:33 -0500478 unsigned long timeout;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800479
480 init_completion(dma_c);
481
482 desc->callback = dma_irq_callback;
483 desc->callback_param = this;
484 dmaengine_submit(desc);
Shawn Guod04525e2012-04-11 13:29:31 +0800485 dma_async_issue_pending(get_dma_chan(this));
Huang Shijie10a2bca2011-09-08 10:47:09 +0800486
487 /* Wait for the interrupt from the DMA block. */
Nicholas Mc Guire706d5b22015-02-08 11:37:33 -0500488 timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
489 if (!timeout) {
Sascha Hauerc3ee3f32018-04-26 17:41:22 +0200490 dev_err(this->dev, "DMA timeout, last DMA\n");
Huang Shijie10a2bca2011-09-08 10:47:09 +0800491 gpmi_dump_info(this);
492 return -ETIMEDOUT;
493 }
494 return 0;
495}
496
497/*
498 * This function is used in BCH reading or BCH writing pages.
499 * It will wait for the BCH interrupt as long as ONE second.
500 * Actually, we must wait for two interrupts :
501 * [1] firstly the DMA interrupt and
502 * [2] secondly the BCH interrupt.
503 */
504int start_dma_with_bch_irq(struct gpmi_nand_data *this,
505 struct dma_async_tx_descriptor *desc)
506{
507 struct completion *bch_c = &this->bch_done;
Nicholas Mc Guire706d5b22015-02-08 11:37:33 -0500508 unsigned long timeout;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800509
510 /* Prepare to receive an interrupt from the BCH block. */
511 init_completion(bch_c);
512
513 /* start the DMA */
514 start_dma_without_bch_irq(this, desc);
515
516 /* Wait for the interrupt from the BCH block. */
Nicholas Mc Guire706d5b22015-02-08 11:37:33 -0500517 timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
518 if (!timeout) {
Sascha Hauerc3ee3f32018-04-26 17:41:22 +0200519 dev_err(this->dev, "BCH timeout\n");
Huang Shijie10a2bca2011-09-08 10:47:09 +0800520 gpmi_dump_info(this);
521 return -ETIMEDOUT;
522 }
523 return 0;
524}
525
Greg Kroah-Hartmand8929942012-12-21 13:19:05 -0800526static int acquire_register_block(struct gpmi_nand_data *this,
527 const char *res_name)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800528{
529 struct platform_device *pdev = this->pdev;
530 struct resources *res = &this->resources;
531 struct resource *r;
Huang Shijie513d57e2012-07-17 14:14:02 +0800532 void __iomem *p;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800533
534 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
Huang Shijie87a9d692013-11-14 14:25:48 +0800535 p = devm_ioremap_resource(&pdev->dev, r);
536 if (IS_ERR(p))
537 return PTR_ERR(p);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800538
539 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
540 res->gpmi_regs = p;
541 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
542 res->bch_regs = p;
543 else
Huang Shijieda40c162013-11-20 10:09:43 +0800544 dev_err(this->dev, "unknown resource name : %s\n", res_name);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800545
546 return 0;
547}
548
Greg Kroah-Hartmand8929942012-12-21 13:19:05 -0800549static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800550{
551 struct platform_device *pdev = this->pdev;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800552 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
553 struct resource *r;
554 int err;
555
556 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
557 if (!r) {
Huang Shijieda40c162013-11-20 10:09:43 +0800558 dev_err(this->dev, "Can't get resource for %s\n", res_name);
Lothar Waßmann52a073b2013-08-07 08:15:38 +0200559 return -ENODEV;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800560 }
561
Huang Shijie3cb2c1e2013-11-14 14:25:49 +0800562 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
563 if (err)
564 dev_err(this->dev, "error requesting BCH IRQ\n");
Huang Shijie10a2bca2011-09-08 10:47:09 +0800565
Huang Shijie3cb2c1e2013-11-14 14:25:49 +0800566 return err;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800567}
568
Huang Shijie10a2bca2011-09-08 10:47:09 +0800569static void release_dma_channels(struct gpmi_nand_data *this)
570{
571 unsigned int i;
572 for (i = 0; i < DMA_CHANS; i++)
573 if (this->dma_chans[i]) {
574 dma_release_channel(this->dma_chans[i]);
575 this->dma_chans[i] = NULL;
576 }
577}
578
Bill Pemberton06f25512012-11-19 13:23:07 -0500579static int acquire_dma_channels(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800580{
581 struct platform_device *pdev = this->pdev;
Huang Shijiee10db1f2012-05-04 21:42:05 -0400582 struct dma_chan *dma_chan;
Huang Shijiee10db1f2012-05-04 21:42:05 -0400583
584 /* request dma channel */
Shawn Guo5fac0e12013-02-26 11:44:28 +0800585 dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
Huang Shijiee10db1f2012-05-04 21:42:05 -0400586 if (!dma_chan) {
Huang Shijieda40c162013-11-20 10:09:43 +0800587 dev_err(this->dev, "Failed to request DMA channel.\n");
Huang Shijiee10db1f2012-05-04 21:42:05 -0400588 goto acquire_err;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800589 }
590
Huang Shijiee10db1f2012-05-04 21:42:05 -0400591 this->dma_chans[0] = dma_chan;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800592 return 0;
593
594acquire_err:
Huang Shijie10a2bca2011-09-08 10:47:09 +0800595 release_dma_channels(this);
596 return -EINVAL;
597}
598
Bill Pemberton06f25512012-11-19 13:23:07 -0500599static int gpmi_get_clks(struct gpmi_nand_data *this)
Huang Shijieff506172012-07-02 21:39:32 -0400600{
601 struct resources *r = &this->resources;
Huang Shijieff506172012-07-02 21:39:32 -0400602 struct clk *clk;
Michał Mirosławd1cb5562013-05-04 15:19:35 +0200603 int err, i;
Huang Shijieff506172012-07-02 21:39:32 -0400604
Stefan Agner6b7ee722017-04-21 18:23:34 -0700605 for (i = 0; i < this->devdata->clks_count; i++) {
606 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
Michał Mirosławd1cb5562013-05-04 15:19:35 +0200607 if (IS_ERR(clk)) {
608 err = PTR_ERR(clk);
Huang Shijieff506172012-07-02 21:39:32 -0400609 goto err_clock;
Michał Mirosławd1cb5562013-05-04 15:19:35 +0200610 }
Huang Shijieff506172012-07-02 21:39:32 -0400611
612 r->clock[i] = clk;
613 }
614
Huang Shijie91f54982014-03-27 10:43:22 +0800615 if (GPMI_IS_MX6(this))
Huang Shijieff506172012-07-02 21:39:32 -0400616 /*
Huang Shijie91f54982014-03-27 10:43:22 +0800617 * Set the default value for the gpmi clock.
Huang Shijieff506172012-07-02 21:39:32 -0400618 *
Huang Shijiee1ca95e2012-09-13 14:57:58 +0800619 * If you want to use the ONFI nand which is in the
620 * Synchronous Mode, you should change the clock as you need.
Huang Shijieff506172012-07-02 21:39:32 -0400621 */
622 clk_set_rate(r->clock[0], 22000000);
Huang Shijiee1ca95e2012-09-13 14:57:58 +0800623
Huang Shijieff506172012-07-02 21:39:32 -0400624 return 0;
625
626err_clock:
627 dev_dbg(this->dev, "failed in finding the clocks.\n");
Michał Mirosławd1cb5562013-05-04 15:19:35 +0200628 return err;
Huang Shijieff506172012-07-02 21:39:32 -0400629}
630
Bill Pemberton06f25512012-11-19 13:23:07 -0500631static int acquire_resources(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800632{
Huang Shijie10a2bca2011-09-08 10:47:09 +0800633 int ret;
634
635 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
636 if (ret)
637 goto exit_regs;
638
639 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
640 if (ret)
641 goto exit_regs;
642
643 ret = acquire_bch_irq(this, bch_irq);
644 if (ret)
645 goto exit_regs;
646
647 ret = acquire_dma_channels(this);
648 if (ret)
Huang Shijie3cb2c1e2013-11-14 14:25:49 +0800649 goto exit_regs;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800650
Huang Shijieff506172012-07-02 21:39:32 -0400651 ret = gpmi_get_clks(this);
652 if (ret)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800653 goto exit_clock;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800654 return 0;
655
656exit_clock:
657 release_dma_channels(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800658exit_regs:
Huang Shijie10a2bca2011-09-08 10:47:09 +0800659 return ret;
660}
661
662static void release_resources(struct gpmi_nand_data *this)
663{
Huang Shijie10a2bca2011-09-08 10:47:09 +0800664 release_dma_channels(this);
665}
666
Huang Shijie10a2bca2011-09-08 10:47:09 +0800667static int send_page_prepare(struct gpmi_nand_data *this,
668 const void *source, unsigned length,
669 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
670 const void **use_virt, dma_addr_t *use_phys)
671{
672 struct device *dev = this->dev;
673
674 if (virt_addr_valid(source)) {
675 dma_addr_t source_phys;
676
677 source_phys = dma_map_single(dev, (void *)source, length,
678 DMA_TO_DEVICE);
679 if (dma_mapping_error(dev, source_phys)) {
680 if (alt_size < length) {
Huang Shijieda40c162013-11-20 10:09:43 +0800681 dev_err(dev, "Alternate buffer is too small\n");
Huang Shijie10a2bca2011-09-08 10:47:09 +0800682 return -ENOMEM;
683 }
684 goto map_failed;
685 }
686 *use_virt = source;
687 *use_phys = source_phys;
688 return 0;
689 }
690map_failed:
691 /*
692 * Copy the content of the source buffer into the alternate
693 * buffer and set up the return values accordingly.
694 */
695 memcpy(alt_virt, source, length);
696
697 *use_virt = alt_virt;
698 *use_phys = alt_phys;
699 return 0;
700}
701
702static void send_page_end(struct gpmi_nand_data *this,
703 const void *source, unsigned length,
704 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
705 const void *used_virt, dma_addr_t used_phys)
706{
707 struct device *dev = this->dev;
708 if (used_virt == source)
709 dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
710}
711
712static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
713{
714 struct device *dev = this->dev;
715
716 if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
717 dma_free_coherent(dev, this->page_buffer_size,
718 this->page_buffer_virt,
719 this->page_buffer_phys);
720 kfree(this->cmd_buffer);
721 kfree(this->data_buffer_dma);
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +0100722 kfree(this->raw_buffer);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800723
724 this->cmd_buffer = NULL;
725 this->data_buffer_dma = NULL;
Han Xu2cd395d2016-04-04 15:41:29 -0500726 this->raw_buffer = NULL;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800727 this->page_buffer_virt = NULL;
728 this->page_buffer_size = 0;
729}
730
731/* Allocate the DMA buffers */
732static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
733{
734 struct bch_geometry *geo = &this->bch_geometry;
735 struct device *dev = this->dev;
Boris BREZILLON2a690b22015-12-10 09:00:07 +0100736 struct mtd_info *mtd = nand_to_mtd(&this->nand);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800737
738 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
Huang Shijie513d57e2012-07-17 14:14:02 +0800739 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800740 if (this->cmd_buffer == NULL)
741 goto error_alloc;
742
Huang Shijie06f216c2013-12-18 23:40:59 +0800743 /*
744 * [2] Allocate a read/write data buffer.
745 * The gpmi_alloc_dma_buffer can be called twice.
746 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
Miquel Raynal5f8374d2018-07-20 17:15:00 +0200747 * is called before the NAND identification; and we allocate a
748 * buffer of the real NAND page size when the gpmi_alloc_dma_buffer
749 * is called after.
Huang Shijie06f216c2013-12-18 23:40:59 +0800750 */
751 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
752 GFP_DMA | GFP_KERNEL);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800753 if (this->data_buffer_dma == NULL)
754 goto error_alloc;
755
756 /*
757 * [3] Allocate the page buffer.
758 *
759 * Both the payload buffer and the auxiliary buffer must appear on
760 * 32-bit boundaries. We presume the size of the payload buffer is a
761 * power of two and is much larger than four, which guarantees the
762 * auxiliary buffer will appear on a 32-bit boundary.
763 */
764 this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
765 this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
766 &this->page_buffer_phys, GFP_DMA);
767 if (!this->page_buffer_virt)
768 goto error_alloc;
769
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +0100770 this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
771 if (!this->raw_buffer)
772 goto error_alloc;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800773
774 /* Slice up the page buffer. */
775 this->payload_virt = this->page_buffer_virt;
776 this->payload_phys = this->page_buffer_phys;
777 this->auxiliary_virt = this->payload_virt + geo->payload_size;
778 this->auxiliary_phys = this->payload_phys + geo->payload_size;
779 return 0;
780
781error_alloc:
782 gpmi_free_dma_buffer(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800783 return -ENOMEM;
784}
785
786static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
787{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100788 struct nand_chip *chip = mtd_to_nand(mtd);
Boris BREZILLONd699ed22015-12-10 09:00:41 +0100789 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800790 int ret;
791
792 /*
793 * Every operation begins with a command byte and a series of zero or
794 * more address bytes. These are distinguished by either the Address
795 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
796 * asserted. When MTD is ready to execute the command, it will deassert
797 * both latch enables.
798 *
799 * Rather than run a separate DMA operation for every single byte, we
800 * queue them up and run a single DMA operation for the entire series
801 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
802 */
803 if ((ctrl & (NAND_ALE | NAND_CLE))) {
804 if (data != NAND_CMD_NONE)
805 this->cmd_buffer[this->command_length++] = data;
806 return;
807 }
808
809 if (!this->command_length)
810 return;
811
812 ret = gpmi_send_command(this);
813 if (ret)
Huang Shijieda40c162013-11-20 10:09:43 +0800814 dev_err(this->dev, "Chip: %u, Error %d\n",
815 this->current_chip, ret);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800816
817 this->command_length = 0;
818}
819
820static int gpmi_dev_ready(struct mtd_info *mtd)
821{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100822 struct nand_chip *chip = mtd_to_nand(mtd);
Boris BREZILLONd699ed22015-12-10 09:00:41 +0100823 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800824
825 return gpmi_is_ready(this, this->current_chip);
826}
827
828static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
829{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100830 struct nand_chip *chip = mtd_to_nand(mtd);
Boris BREZILLONd699ed22015-12-10 09:00:41 +0100831 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Miquel Raynal76e1a002018-03-02 15:38:39 +0100832 int ret;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800833
Miquel Raynal76e1a002018-03-02 15:38:39 +0100834 /*
835 * For power consumption matters, disable/enable the clock each time a
836 * die is selected/unselected.
837 */
838 if (this->current_chip < 0 && chipnr >= 0) {
839 ret = gpmi_enable_clk(this);
840 if (ret)
841 dev_err(this->dev, "Failed to enable the clock\n");
842 } else if (this->current_chip >= 0 && chipnr < 0) {
843 ret = gpmi_disable_clk(this);
844 if (ret)
845 dev_err(this->dev, "Failed to disable the clock\n");
846 }
847
848 /*
849 * This driver currently supports only one NAND chip. Plus, dies share
850 * the same configuration. So once timings have been applied on the
851 * controller side, they will not change anymore. When the time will
852 * come, the check on must_apply_timings will have to be dropped.
853 */
854 if (chipnr >= 0 && this->hw.must_apply_timings) {
855 this->hw.must_apply_timings = false;
856 gpmi_nfc_apply_timings(this);
857 }
Huang Shijie10a2bca2011-09-08 10:47:09 +0800858
859 this->current_chip = chipnr;
860}
861
862static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
863{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100864 struct nand_chip *chip = mtd_to_nand(mtd);
Boris BREZILLONd699ed22015-12-10 09:00:41 +0100865 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800866
Huang Shijiec2325962013-11-20 10:09:44 +0800867 dev_dbg(this->dev, "len is %d\n", len);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800868
Sascha Hauerba3900e2018-04-26 17:41:23 +0200869 gpmi_read_data(this, buf, len);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800870}
871
872static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
873{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100874 struct nand_chip *chip = mtd_to_nand(mtd);
Boris BREZILLONd699ed22015-12-10 09:00:41 +0100875 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800876
Huang Shijiec2325962013-11-20 10:09:44 +0800877 dev_dbg(this->dev, "len is %d\n", len);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800878
Sascha Hauerba3900e2018-04-26 17:41:23 +0200879 gpmi_send_data(this, buf, len);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800880}
881
882static uint8_t gpmi_read_byte(struct mtd_info *mtd)
883{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100884 struct nand_chip *chip = mtd_to_nand(mtd);
Boris BREZILLONd699ed22015-12-10 09:00:41 +0100885 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800886 uint8_t *buf = this->data_buffer_dma;
887
888 gpmi_read_buf(mtd, buf, 1);
889 return buf[0];
890}
891
892/*
893 * Handles block mark swapping.
894 * It can be called in swapping the block mark, or swapping it back,
895 * because the the operations are the same.
896 */
897static void block_mark_swapping(struct gpmi_nand_data *this,
898 void *payload, void *auxiliary)
899{
900 struct bch_geometry *nfc_geo = &this->bch_geometry;
901 unsigned char *p;
902 unsigned char *a;
903 unsigned int bit;
904 unsigned char mask;
905 unsigned char from_data;
906 unsigned char from_oob;
907
908 if (!this->swap_block_mark)
909 return;
910
911 /*
912 * If control arrives here, we're swapping. Make some convenience
913 * variables.
914 */
915 bit = nfc_geo->block_mark_bit_offset;
916 p = payload + nfc_geo->block_mark_byte_offset;
917 a = auxiliary;
918
919 /*
920 * Get the byte from the data area that overlays the block mark. Since
921 * the ECC engine applies its own view to the bits in the page, the
922 * physical block mark won't (in general) appear on a byte boundary in
923 * the data.
924 */
925 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
926
927 /* Get the byte from the OOB. */
928 from_oob = a[0];
929
930 /* Swap them. */
931 a[0] = from_data;
932
933 mask = (0x1 << bit) - 1;
934 p[0] = (p[0] & mask) | (from_oob << bit);
935
936 mask = ~0 << bit;
937 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
938}
939
Boris Brezillon4c7e95b2018-01-23 11:13:17 +0100940static int gpmi_ecc_read_page_data(struct nand_chip *chip,
941 uint8_t *buf, int oob_required,
942 int page)
Huang Shijie10a2bca2011-09-08 10:47:09 +0800943{
Boris BREZILLONd699ed22015-12-10 09:00:41 +0100944 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800945 struct bch_geometry *nfc_geo = &this->bch_geometry;
Boris Brezillon4c7e95b2018-01-23 11:13:17 +0100946 struct mtd_info *mtd = nand_to_mtd(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +0800947 dma_addr_t payload_phys;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800948 unsigned int i;
949 unsigned char *status;
Zach Sadeckib23b7462012-12-13 20:36:29 -0600950 unsigned int max_bitflips = 0;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800951 int ret;
Sascha Hauer111bfed2018-04-26 17:41:25 +0200952 bool direct = false;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800953
Huang Shijiec2325962013-11-20 10:09:44 +0800954 dev_dbg(this->dev, "page number is : %d\n", page);
Sascha Hauer118c3f92018-04-26 17:41:24 +0200955
Sascha Hauer118c3f92018-04-26 17:41:24 +0200956 payload_phys = this->payload_phys;
Sascha Hauer118c3f92018-04-26 17:41:24 +0200957
958 if (virt_addr_valid(buf)) {
959 dma_addr_t dest_phys;
960
961 dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
962 DMA_FROM_DEVICE);
963 if (!dma_mapping_error(this->dev, dest_phys)) {
Sascha Hauer118c3f92018-04-26 17:41:24 +0200964 payload_phys = dest_phys;
Sascha Hauer111bfed2018-04-26 17:41:25 +0200965 direct = true;
Sascha Hauer118c3f92018-04-26 17:41:24 +0200966 }
Huang Shijie10a2bca2011-09-08 10:47:09 +0800967 }
Sascha Hauer118c3f92018-04-26 17:41:24 +0200968
Huang Shijie10a2bca2011-09-08 10:47:09 +0800969 /* go! */
Sascha Hauerf6b74db2018-04-26 17:41:27 +0200970 ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys);
Sascha Hauer118c3f92018-04-26 17:41:24 +0200971
Sascha Hauer111bfed2018-04-26 17:41:25 +0200972 if (direct)
Sascha Hauer118c3f92018-04-26 17:41:24 +0200973 dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size,
974 DMA_FROM_DEVICE);
975
Huang Shijie10a2bca2011-09-08 10:47:09 +0800976 if (ret) {
Huang Shijieda40c162013-11-20 10:09:43 +0800977 dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
Zach Sadeckib23b7462012-12-13 20:36:29 -0600978 return ret;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800979 }
980
Huang Shijie10a2bca2011-09-08 10:47:09 +0800981 /* Loop over status bytes, accumulating ECC status. */
Sascha Hauerf6b74db2018-04-26 17:41:27 +0200982 status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset;
Huang Shijie10a2bca2011-09-08 10:47:09 +0800983
Sascha Hauer111bfed2018-04-26 17:41:25 +0200984 if (!direct)
Sascha Hauer118c3f92018-04-26 17:41:24 +0200985 memcpy(buf, this->payload_virt, nfc_geo->payload_size);
Markus Pargmannbd2e7782016-04-25 14:35:12 +0200986
Huang Shijie10a2bca2011-09-08 10:47:09 +0800987 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
988 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
989 continue;
990
991 if (*status == STATUS_UNCORRECTABLE) {
Markus Pargmannbd2e7782016-04-25 14:35:12 +0200992 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
993 u8 *eccbuf = this->raw_buffer;
994 int offset, bitoffset;
995 int eccbytes;
996 int flips;
997
998 /* Read ECC bytes into our internal raw_buffer */
999 offset = nfc_geo->metadata_size * 8;
1000 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1001 offset -= eccbits;
1002 bitoffset = offset % 8;
1003 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1004 offset /= 8;
1005 eccbytes -= offset;
Boris Brezillon97d90da2017-11-30 18:01:29 +01001006 nand_change_read_column_op(chip, offset, eccbuf,
1007 eccbytes, false);
Markus Pargmannbd2e7782016-04-25 14:35:12 +02001008
1009 /*
1010 * ECC data are not byte aligned and we may have
1011 * in-band data in the first and last byte of
1012 * eccbuf. Set non-eccbits to one so that
1013 * nand_check_erased_ecc_chunk() does not count them
1014 * as bitflips.
1015 */
1016 if (bitoffset)
1017 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1018
1019 bitoffset = (bitoffset + eccbits) % 8;
1020 if (bitoffset)
1021 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1022
1023 /*
1024 * The ECC hardware has an uncorrectable ECC status
1025 * code in case we have bitflips in an erased page. As
1026 * nothing was written into this subpage the ECC is
1027 * obviously wrong and we can not trust it. We assume
1028 * at this point that we are reading an erased page and
1029 * try to correct the bitflips in buffer up to
1030 * ecc_strength bitflips. If this is a page with random
1031 * data, we exceed this number of bitflips and have a
1032 * ECC failure. Otherwise we use the corrected buffer.
1033 */
1034 if (i == 0) {
1035 /* The first block includes metadata */
1036 flips = nand_check_erased_ecc_chunk(
1037 buf + i * nfc_geo->ecc_chunk_size,
1038 nfc_geo->ecc_chunk_size,
1039 eccbuf, eccbytes,
Sascha Hauerf6b74db2018-04-26 17:41:27 +02001040 this->auxiliary_virt,
Markus Pargmannbd2e7782016-04-25 14:35:12 +02001041 nfc_geo->metadata_size,
1042 nfc_geo->ecc_strength);
1043 } else {
1044 flips = nand_check_erased_ecc_chunk(
1045 buf + i * nfc_geo->ecc_chunk_size,
1046 nfc_geo->ecc_chunk_size,
1047 eccbuf, eccbytes,
1048 NULL, 0,
1049 nfc_geo->ecc_strength);
1050 }
1051
1052 if (flips > 0) {
1053 max_bitflips = max_t(unsigned int, max_bitflips,
1054 flips);
1055 mtd->ecc_stats.corrected += flips;
1056 continue;
1057 }
1058
Zach Sadeckib23b7462012-12-13 20:36:29 -06001059 mtd->ecc_stats.failed++;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001060 continue;
1061 }
Markus Pargmannbd2e7782016-04-25 14:35:12 +02001062
Zach Sadeckib23b7462012-12-13 20:36:29 -06001063 mtd->ecc_stats.corrected += *status;
1064 max_bitflips = max_t(unsigned int, max_bitflips, *status);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001065 }
1066
Sascha Hauerfdf2e822017-12-05 11:51:40 +01001067 /* handle the block mark swapping */
Sascha Hauerf6b74db2018-04-26 17:41:27 +02001068 block_mark_swapping(this, buf, this->auxiliary_virt);
Sascha Hauerfdf2e822017-12-05 11:51:40 +01001069
Brian Norris7725cc82012-05-02 10:15:02 -07001070 if (oob_required) {
1071 /*
1072 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1073 * for details about our policy for delivering the OOB.
1074 *
1075 * We fill the caller's buffer with set bits, and then copy the
1076 * block mark to th caller's buffer. Note that, if block mark
1077 * swapping was necessary, it has already been done, so we can
1078 * rely on the first byte of the auxiliary buffer to contain
1079 * the block mark.
1080 */
1081 memset(chip->oob_poi, ~0, mtd->oobsize);
Sascha Hauerf6b74db2018-04-26 17:41:27 +02001082 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
Brian Norris7725cc82012-05-02 10:15:02 -07001083 }
Sascha Hauer60238132012-06-26 17:26:16 +02001084
Zach Sadeckib23b7462012-12-13 20:36:29 -06001085 return max_bitflips;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001086}
1087
Boris Brezillonb9761682018-09-06 14:05:20 +02001088static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1089 int oob_required, int page)
Boris Brezillon4c7e95b2018-01-23 11:13:17 +01001090{
1091 nand_read_page_op(chip, page, 0, NULL, 0);
1092
1093 return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
1094}
1095
Huang Shijieb8e29312014-01-03 11:01:42 +08001096/* Fake a virtual small page for the subpage read */
Boris Brezillonb9761682018-09-06 14:05:20 +02001097static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1098 uint32_t len, uint8_t *buf, int page)
Huang Shijieb8e29312014-01-03 11:01:42 +08001099{
Boris BREZILLONd699ed22015-12-10 09:00:41 +01001100 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijieb8e29312014-01-03 11:01:42 +08001101 void __iomem *bch_regs = this->resources.bch_regs;
1102 struct bch_geometry old_geo = this->bch_geometry;
1103 struct bch_geometry *geo = &this->bch_geometry;
1104 int size = chip->ecc.size; /* ECC chunk size */
1105 int meta, n, page_size;
1106 u32 r1_old, r2_old, r1_new, r2_new;
1107 unsigned int max_bitflips;
1108 int first, last, marker_pos;
1109 int ecc_parity_size;
1110 int col = 0;
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001111 int old_swap_block_mark = this->swap_block_mark;
Huang Shijieb8e29312014-01-03 11:01:42 +08001112
1113 /* The size of ECC parity */
1114 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1115
1116 /* Align it with the chunk size */
1117 first = offs / size;
1118 last = (offs + len - 1) / size;
1119
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001120 if (this->swap_block_mark) {
1121 /*
1122 * Find the chunk which contains the Block Marker.
1123 * If this chunk is in the range of [first, last],
1124 * we have to read out the whole page.
1125 * Why? since we had swapped the data at the position of Block
1126 * Marker to the metadata which is bound with the chunk 0.
1127 */
1128 marker_pos = geo->block_mark_byte_offset / size;
1129 if (last >= marker_pos && first <= marker_pos) {
1130 dev_dbg(this->dev,
1131 "page:%d, first:%d, last:%d, marker at:%d\n",
Huang Shijieb8e29312014-01-03 11:01:42 +08001132 page, first, last, marker_pos);
Boris Brezillonb9761682018-09-06 14:05:20 +02001133 return gpmi_ecc_read_page(chip, buf, 0, page);
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001134 }
Huang Shijieb8e29312014-01-03 11:01:42 +08001135 }
1136
1137 meta = geo->metadata_size;
1138 if (first) {
1139 col = meta + (size + ecc_parity_size) * first;
Huang Shijieb8e29312014-01-03 11:01:42 +08001140 meta = 0;
1141 buf = buf + first * size;
1142 }
1143
Boris Brezillon25f815f2017-11-30 18:01:30 +01001144 nand_read_page_op(chip, page, col, NULL, 0);
1145
Huang Shijieb8e29312014-01-03 11:01:42 +08001146 /* Save the old environment */
1147 r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
1148 r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
1149
1150 /* change the BCH registers and bch_geometry{} */
1151 n = last - first + 1;
1152 page_size = meta + (size + ecc_parity_size) * n;
1153
1154 r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
1155 BM_BCH_FLASH0LAYOUT0_META_SIZE);
1156 r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
1157 | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
1158 writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
1159
1160 r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
1161 r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
1162 writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
1163
1164 geo->ecc_chunk_count = n;
1165 geo->payload_size = n * size;
1166 geo->page_size = page_size;
1167 geo->auxiliary_status_offset = ALIGN(meta, 4);
1168
1169 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1170 page, offs, len, col, first, n, page_size);
1171
1172 /* Read the subpage now */
1173 this->swap_block_mark = false;
Boris Brezillon4c7e95b2018-01-23 11:13:17 +01001174 max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
Huang Shijieb8e29312014-01-03 11:01:42 +08001175
1176 /* Restore */
1177 writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
1178 writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
1179 this->bch_geometry = old_geo;
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001180 this->swap_block_mark = old_swap_block_mark;
Huang Shijieb8e29312014-01-03 11:01:42 +08001181
1182 return max_bitflips;
1183}
1184
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001185static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1186 int oob_required, int page)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001187{
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001188 struct mtd_info *mtd = nand_to_mtd(chip);
Boris BREZILLONd699ed22015-12-10 09:00:41 +01001189 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001190 struct bch_geometry *nfc_geo = &this->bch_geometry;
1191 const void *payload_virt;
1192 dma_addr_t payload_phys;
1193 const void *auxiliary_virt;
1194 dma_addr_t auxiliary_phys;
1195 int ret;
1196
Huang Shijiec2325962013-11-20 10:09:44 +08001197 dev_dbg(this->dev, "ecc write page.\n");
Boris Brezillon25f815f2017-11-30 18:01:30 +01001198
1199 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1200
Huang Shijie10a2bca2011-09-08 10:47:09 +08001201 if (this->swap_block_mark) {
1202 /*
1203 * If control arrives here, we're doing block mark swapping.
1204 * Since we can't modify the caller's buffers, we must copy them
1205 * into our own.
1206 */
1207 memcpy(this->payload_virt, buf, mtd->writesize);
1208 payload_virt = this->payload_virt;
1209 payload_phys = this->payload_phys;
1210
1211 memcpy(this->auxiliary_virt, chip->oob_poi,
1212 nfc_geo->auxiliary_size);
1213 auxiliary_virt = this->auxiliary_virt;
1214 auxiliary_phys = this->auxiliary_phys;
1215
1216 /* Handle block mark swapping. */
1217 block_mark_swapping(this,
Lothar Waßmann6a760962014-06-12 15:20:41 +02001218 (void *)payload_virt, (void *)auxiliary_virt);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001219 } else {
1220 /*
1221 * If control arrives here, we're not doing block mark swapping,
1222 * so we can to try and use the caller's buffers.
1223 */
1224 ret = send_page_prepare(this,
1225 buf, mtd->writesize,
1226 this->payload_virt, this->payload_phys,
1227 nfc_geo->payload_size,
1228 &payload_virt, &payload_phys);
1229 if (ret) {
Huang Shijieda40c162013-11-20 10:09:43 +08001230 dev_err(this->dev, "Inadequate payload DMA buffer\n");
Josh Wufdbad98d2012-06-25 18:07:45 +08001231 return 0;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001232 }
1233
1234 ret = send_page_prepare(this,
1235 chip->oob_poi, mtd->oobsize,
1236 this->auxiliary_virt, this->auxiliary_phys,
1237 nfc_geo->auxiliary_size,
1238 &auxiliary_virt, &auxiliary_phys);
1239 if (ret) {
Huang Shijieda40c162013-11-20 10:09:43 +08001240 dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
Huang Shijie10a2bca2011-09-08 10:47:09 +08001241 goto exit_auxiliary;
1242 }
1243 }
1244
1245 /* Ask the NFC. */
1246 ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
1247 if (ret)
Huang Shijieda40c162013-11-20 10:09:43 +08001248 dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001249
1250 if (!this->swap_block_mark) {
1251 send_page_end(this, chip->oob_poi, mtd->oobsize,
1252 this->auxiliary_virt, this->auxiliary_phys,
1253 nfc_geo->auxiliary_size,
1254 auxiliary_virt, auxiliary_phys);
1255exit_auxiliary:
1256 send_page_end(this, buf, mtd->writesize,
1257 this->payload_virt, this->payload_phys,
1258 nfc_geo->payload_size,
1259 payload_virt, payload_phys);
1260 }
Josh Wufdbad98d2012-06-25 18:07:45 +08001261
Boris Brezillon25f815f2017-11-30 18:01:30 +01001262 if (ret)
1263 return ret;
1264
1265 return nand_prog_page_end_op(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001266}
1267
1268/*
1269 * There are several places in this driver where we have to handle the OOB and
1270 * block marks. This is the function where things are the most complicated, so
1271 * this is where we try to explain it all. All the other places refer back to
1272 * here.
1273 *
1274 * These are the rules, in order of decreasing importance:
1275 *
1276 * 1) Nothing the caller does can be allowed to imperil the block mark.
1277 *
1278 * 2) In read operations, the first byte of the OOB we return must reflect the
1279 * true state of the block mark, no matter where that block mark appears in
1280 * the physical page.
1281 *
1282 * 3) ECC-based read operations return an OOB full of set bits (since we never
1283 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1284 * return).
1285 *
1286 * 4) "Raw" read operations return a direct view of the physical bytes in the
1287 * page, using the conventional definition of which bytes are data and which
1288 * are OOB. This gives the caller a way to see the actual, physical bytes
1289 * in the page, without the distortions applied by our ECC engine.
1290 *
1291 *
1292 * What we do for this specific read operation depends on two questions:
1293 *
1294 * 1) Are we doing a "raw" read, or an ECC-based read?
1295 *
1296 * 2) Are we using block mark swapping or transcription?
1297 *
1298 * There are four cases, illustrated by the following Karnaugh map:
1299 *
1300 * | Raw | ECC-based |
1301 * -------------+-------------------------+-------------------------+
1302 * | Read the conventional | |
1303 * | OOB at the end of the | |
1304 * Swapping | page and return it. It | |
1305 * | contains exactly what | |
1306 * | we want. | Read the block mark and |
1307 * -------------+-------------------------+ return it in a buffer |
1308 * | Read the conventional | full of set bits. |
1309 * | OOB at the end of the | |
1310 * | page and also the block | |
1311 * Transcribing | mark in the metadata. | |
1312 * | Copy the block mark | |
1313 * | into the first byte of | |
1314 * | the OOB. | |
1315 * -------------+-------------------------+-------------------------+
1316 *
1317 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1318 * giving an accurate view of the actual, physical bytes in the page (we're
1319 * overwriting the block mark). That's OK because it's more important to follow
1320 * rule #2.
1321 *
1322 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1323 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1324 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1325 * ECC-based or raw view of the page is implicit in which function it calls
1326 * (there is a similar pair of ECC-based/raw functions for writing).
Huang Shijie10a2bca2011-09-08 10:47:09 +08001327 */
Boris Brezillonb9761682018-09-06 14:05:20 +02001328static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001329{
Boris Brezillonb9761682018-09-06 14:05:20 +02001330 struct mtd_info *mtd = nand_to_mtd(chip);
Boris BREZILLONd699ed22015-12-10 09:00:41 +01001331 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001332
Huang Shijiec2325962013-11-20 10:09:44 +08001333 dev_dbg(this->dev, "page number is %d\n", page);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001334 /* clear the OOB buffer */
1335 memset(chip->oob_poi, ~0, mtd->oobsize);
1336
1337 /* Read out the conventional OOB. */
Boris Brezillon97d90da2017-11-30 18:01:29 +01001338 nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001339 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1340
1341 /*
1342 * Now, we want to make sure the block mark is correct. In the
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001343 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1344 * Otherwise, we need to explicitly read it.
Huang Shijie10a2bca2011-09-08 10:47:09 +08001345 */
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001346 if (GPMI_IS_MX23(this)) {
Huang Shijie10a2bca2011-09-08 10:47:09 +08001347 /* Read the block mark into the first byte of the OOB buffer. */
Boris Brezillon97d90da2017-11-30 18:01:29 +01001348 nand_read_page_op(chip, page, 0, NULL, 0);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001349 chip->oob_poi[0] = chip->read_byte(mtd);
1350 }
1351
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +03001352 return 0;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001353}
1354
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001355static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001356{
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001357 struct mtd_info *mtd = nand_to_mtd(chip);
Boris Brezillon191a8292016-02-03 20:11:44 +01001358 struct mtd_oob_region of = { };
Huang Shijie7a2b89a2013-09-25 14:58:15 +08001359
1360 /* Do we have available oob area? */
Boris Brezillon191a8292016-02-03 20:11:44 +01001361 mtd_ooblayout_free(mtd, 0, &of);
1362 if (!of.length)
Huang Shijie7a2b89a2013-09-25 14:58:15 +08001363 return -EPERM;
1364
1365 if (!nand_is_slc(chip))
1366 return -EPERM;
1367
Boris Brezillon97d90da2017-11-30 18:01:29 +01001368 return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1369 chip->oob_poi + of.offset, of.length);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001370}
1371
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001372/*
1373 * This function reads a NAND page without involving the ECC engine (no HW
1374 * ECC correction).
1375 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1376 * inline (interleaved with payload DATA), and do not align data chunk on
1377 * byte boundaries.
1378 * We thus need to take care moving the payload data and ECC bits stored in the
1379 * page into the provided buffers, which is why we're using gpmi_copy_bits.
1380 *
1381 * See set_geometry_by_ecc_info inline comments to have a full description
1382 * of the layout used by the GPMI controller.
1383 */
Boris Brezillonb9761682018-09-06 14:05:20 +02001384static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001385 int oob_required, int page)
1386{
Boris Brezillonb9761682018-09-06 14:05:20 +02001387 struct mtd_info *mtd = nand_to_mtd(chip);
Boris BREZILLONd699ed22015-12-10 09:00:41 +01001388 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001389 struct bch_geometry *nfc_geo = &this->bch_geometry;
1390 int eccsize = nfc_geo->ecc_chunk_size;
1391 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1392 u8 *tmp_buf = this->raw_buffer;
1393 size_t src_bit_off;
1394 size_t oob_bit_off;
1395 size_t oob_byte_off;
1396 uint8_t *oob = chip->oob_poi;
1397 int step;
1398
Boris Brezillon25f815f2017-11-30 18:01:30 +01001399 nand_read_page_op(chip, page, 0, tmp_buf,
1400 mtd->writesize + mtd->oobsize);
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001401
1402 /*
1403 * If required, swap the bad block marker and the data stored in the
1404 * metadata section, so that we don't wrongly consider a block as bad.
1405 *
1406 * See the layout description for a detailed explanation on why this
1407 * is needed.
1408 */
Gustavo A. R. Silvab13a9732017-11-03 15:31:47 -05001409 if (this->swap_block_mark)
1410 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001411
1412 /*
1413 * Copy the metadata section into the oob buffer (this section is
1414 * guaranteed to be aligned on a byte boundary).
1415 */
1416 if (oob_required)
1417 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1418
1419 oob_bit_off = nfc_geo->metadata_size * 8;
1420 src_bit_off = oob_bit_off;
1421
1422 /* Extract interleaved payload data and ECC bits */
1423 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1424 if (buf)
1425 gpmi_copy_bits(buf, step * eccsize * 8,
1426 tmp_buf, src_bit_off,
1427 eccsize * 8);
1428 src_bit_off += eccsize * 8;
1429
1430 /* Align last ECC block to align a byte boundary */
1431 if (step == nfc_geo->ecc_chunk_count - 1 &&
1432 (oob_bit_off + eccbits) % 8)
1433 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1434
1435 if (oob_required)
1436 gpmi_copy_bits(oob, oob_bit_off,
1437 tmp_buf, src_bit_off,
1438 eccbits);
1439
1440 src_bit_off += eccbits;
1441 oob_bit_off += eccbits;
1442 }
1443
1444 if (oob_required) {
1445 oob_byte_off = oob_bit_off / 8;
1446
1447 if (oob_byte_off < mtd->oobsize)
1448 memcpy(oob + oob_byte_off,
1449 tmp_buf + mtd->writesize + oob_byte_off,
1450 mtd->oobsize - oob_byte_off);
1451 }
1452
1453 return 0;
1454}
1455
1456/*
1457 * This function writes a NAND page without involving the ECC engine (no HW
1458 * ECC generation).
1459 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1460 * inline (interleaved with payload DATA), and do not align data chunk on
1461 * byte boundaries.
1462 * We thus need to take care moving the OOB area at the right place in the
1463 * final page, which is why we're using gpmi_copy_bits.
1464 *
1465 * See set_geometry_by_ecc_info inline comments to have a full description
1466 * of the layout used by the GPMI controller.
1467 */
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001468static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
Boris BREZILLON45aaeff2015-10-13 11:22:18 +02001469 int oob_required, int page)
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001470{
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001471 struct mtd_info *mtd = nand_to_mtd(chip);
Boris BREZILLONd699ed22015-12-10 09:00:41 +01001472 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001473 struct bch_geometry *nfc_geo = &this->bch_geometry;
1474 int eccsize = nfc_geo->ecc_chunk_size;
1475 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1476 u8 *tmp_buf = this->raw_buffer;
1477 uint8_t *oob = chip->oob_poi;
1478 size_t dst_bit_off;
1479 size_t oob_bit_off;
1480 size_t oob_byte_off;
1481 int step;
1482
1483 /*
1484 * Initialize all bits to 1 in case we don't have a buffer for the
1485 * payload or oob data in order to leave unspecified bits of data
1486 * to their initial state.
1487 */
1488 if (!buf || !oob_required)
1489 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1490
1491 /*
1492 * First copy the metadata section (stored in oob buffer) at the
1493 * beginning of the page, as imposed by the GPMI layout.
1494 */
1495 memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1496 oob_bit_off = nfc_geo->metadata_size * 8;
1497 dst_bit_off = oob_bit_off;
1498
1499 /* Interleave payload data and ECC bits */
1500 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1501 if (buf)
1502 gpmi_copy_bits(tmp_buf, dst_bit_off,
1503 buf, step * eccsize * 8, eccsize * 8);
1504 dst_bit_off += eccsize * 8;
1505
1506 /* Align last ECC block to align a byte boundary */
1507 if (step == nfc_geo->ecc_chunk_count - 1 &&
1508 (oob_bit_off + eccbits) % 8)
1509 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1510
1511 if (oob_required)
1512 gpmi_copy_bits(tmp_buf, dst_bit_off,
1513 oob, oob_bit_off, eccbits);
1514
1515 dst_bit_off += eccbits;
1516 oob_bit_off += eccbits;
1517 }
1518
1519 oob_byte_off = oob_bit_off / 8;
1520
1521 if (oob_required && oob_byte_off < mtd->oobsize)
1522 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1523 oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1524
1525 /*
1526 * If required, swap the bad block marker and the first byte of the
1527 * metadata section, so that we don't modify the bad block marker.
1528 *
1529 * See the layout description for a detailed explanation on why this
1530 * is needed.
1531 */
Gustavo A. R. Silvab13a9732017-11-03 15:31:47 -05001532 if (this->swap_block_mark)
1533 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001534
Boris Brezillon25f815f2017-11-30 18:01:30 +01001535 return nand_prog_page_op(chip, page, 0, tmp_buf,
1536 mtd->writesize + mtd->oobsize);
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001537}
1538
Boris Brezillonb9761682018-09-06 14:05:20 +02001539static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
Boris BREZILLON7ca94e02014-11-30 19:10:30 +01001540{
Boris Brezillonb9761682018-09-06 14:05:20 +02001541 return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
Boris BREZILLON7ca94e02014-11-30 19:10:30 +01001542}
1543
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001544static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
Boris BREZILLON7ca94e02014-11-30 19:10:30 +01001545{
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001546 return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
Boris BREZILLON7ca94e02014-11-30 19:10:30 +01001547}
1548
Huang Shijie10a2bca2011-09-08 10:47:09 +08001549static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1550{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +01001551 struct nand_chip *chip = mtd_to_nand(mtd);
Boris BREZILLONd699ed22015-12-10 09:00:41 +01001552 struct gpmi_nand_data *this = nand_get_controller_data(chip);
Brian Norris5a0edb22013-07-30 17:52:58 -07001553 int ret = 0;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001554 uint8_t *block_mark;
Boris Brezillon97d90da2017-11-30 18:01:29 +01001555 int column, page, chipnr;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001556
Brian Norris5a0edb22013-07-30 17:52:58 -07001557 chipnr = (int)(ofs >> chip->chip_shift);
1558 chip->select_chip(mtd, chipnr);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001559
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001560 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001561
Brian Norris5a0edb22013-07-30 17:52:58 -07001562 /* Write the block mark. */
1563 block_mark = this->data_buffer_dma;
1564 block_mark[0] = 0; /* bad block marker */
Huang Shijie10a2bca2011-09-08 10:47:09 +08001565
Brian Norris5a0edb22013-07-30 17:52:58 -07001566 /* Shift to get page */
1567 page = (int)(ofs >> chip->page_shift);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001568
Boris Brezillon97d90da2017-11-30 18:01:29 +01001569 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001570
Brian Norris5a0edb22013-07-30 17:52:58 -07001571 chip->select_chip(mtd, -1);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001572
1573 return ret;
1574}
1575
Wolfram Sanga78da282012-03-21 19:29:17 +01001576static int nand_boot_set_geometry(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001577{
1578 struct boot_rom_geometry *geometry = &this->rom_geometry;
1579
1580 /*
1581 * Set the boot block stride size.
1582 *
1583 * In principle, we should be reading this from the OTP bits, since
1584 * that's where the ROM is going to get it. In fact, we don't have any
1585 * way to read the OTP bits, so we go with the default and hope for the
1586 * best.
1587 */
1588 geometry->stride_size_in_pages = 64;
1589
1590 /*
1591 * Set the search area stride exponent.
1592 *
1593 * In principle, we should be reading this from the OTP bits, since
1594 * that's where the ROM is going to get it. In fact, we don't have any
1595 * way to read the OTP bits, so we go with the default and hope for the
1596 * best.
1597 */
1598 geometry->search_area_stride_exponent = 2;
1599 return 0;
1600}
1601
1602static const char *fingerprint = "STMP";
Wolfram Sanga78da282012-03-21 19:29:17 +01001603static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001604{
1605 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1606 struct device *dev = this->dev;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001607 struct nand_chip *chip = &this->nand;
Boris BREZILLON2a690b22015-12-10 09:00:07 +01001608 struct mtd_info *mtd = nand_to_mtd(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001609 unsigned int search_area_size_in_strides;
1610 unsigned int stride;
1611 unsigned int page;
Masahiro Yamadac0313b92017-12-05 17:47:16 +09001612 uint8_t *buffer = chip->data_buf;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001613 int saved_chip_number;
1614 int found_an_ncb_fingerprint = false;
1615
1616 /* Compute the number of strides in a search area. */
1617 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1618
1619 saved_chip_number = this->current_chip;
1620 chip->select_chip(mtd, 0);
1621
1622 /*
1623 * Loop through the first search area, looking for the NCB fingerprint.
1624 */
1625 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1626
1627 for (stride = 0; stride < search_area_size_in_strides; stride++) {
Huang Shijie513d57e2012-07-17 14:14:02 +08001628 /* Compute the page addresses. */
Huang Shijie10a2bca2011-09-08 10:47:09 +08001629 page = stride * rom_geo->stride_size_in_pages;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001630
1631 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1632
1633 /*
1634 * Read the NCB fingerprint. The fingerprint is four bytes long
1635 * and starts in the 12th byte of the page.
1636 */
Boris Brezillon97d90da2017-11-30 18:01:29 +01001637 nand_read_page_op(chip, page, 12, NULL, 0);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001638 chip->read_buf(mtd, buffer, strlen(fingerprint));
1639
1640 /* Look for the fingerprint. */
1641 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1642 found_an_ncb_fingerprint = true;
1643 break;
1644 }
1645
1646 }
1647
1648 chip->select_chip(mtd, saved_chip_number);
1649
1650 if (found_an_ncb_fingerprint)
1651 dev_dbg(dev, "\tFound a fingerprint\n");
1652 else
1653 dev_dbg(dev, "\tNo fingerprint found\n");
1654 return found_an_ncb_fingerprint;
1655}
1656
1657/* Writes a transcription stamp. */
Wolfram Sanga78da282012-03-21 19:29:17 +01001658static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001659{
1660 struct device *dev = this->dev;
1661 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001662 struct nand_chip *chip = &this->nand;
Boris BREZILLON2a690b22015-12-10 09:00:07 +01001663 struct mtd_info *mtd = nand_to_mtd(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001664 unsigned int block_size_in_pages;
1665 unsigned int search_area_size_in_strides;
1666 unsigned int search_area_size_in_pages;
1667 unsigned int search_area_size_in_blocks;
1668 unsigned int block;
1669 unsigned int stride;
1670 unsigned int page;
Masahiro Yamadac0313b92017-12-05 17:47:16 +09001671 uint8_t *buffer = chip->data_buf;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001672 int saved_chip_number;
1673 int status;
1674
1675 /* Compute the search area geometry. */
1676 block_size_in_pages = mtd->erasesize / mtd->writesize;
1677 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1678 search_area_size_in_pages = search_area_size_in_strides *
1679 rom_geo->stride_size_in_pages;
1680 search_area_size_in_blocks =
1681 (search_area_size_in_pages + (block_size_in_pages - 1)) /
1682 block_size_in_pages;
1683
1684 dev_dbg(dev, "Search Area Geometry :\n");
1685 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1686 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1687 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
1688
1689 /* Select chip 0. */
1690 saved_chip_number = this->current_chip;
1691 chip->select_chip(mtd, 0);
1692
1693 /* Loop over blocks in the first search area, erasing them. */
1694 dev_dbg(dev, "Erasing the search area...\n");
1695
1696 for (block = 0; block < search_area_size_in_blocks; block++) {
Huang Shijie10a2bca2011-09-08 10:47:09 +08001697 /* Erase this block. */
1698 dev_dbg(dev, "\tErasing block 0x%x\n", block);
Boris Brezillon97d90da2017-11-30 18:01:29 +01001699 status = nand_erase_op(chip, block);
1700 if (status)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001701 dev_err(dev, "[%s] Erase failed.\n", __func__);
1702 }
1703
1704 /* Write the NCB fingerprint into the page buffer. */
1705 memset(buffer, ~0, mtd->writesize);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001706 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1707
1708 /* Loop through the first search area, writing NCB fingerprints. */
1709 dev_dbg(dev, "Writing NCB fingerprints...\n");
1710 for (stride = 0; stride < search_area_size_in_strides; stride++) {
Huang Shijie513d57e2012-07-17 14:14:02 +08001711 /* Compute the page addresses. */
Huang Shijie10a2bca2011-09-08 10:47:09 +08001712 page = stride * rom_geo->stride_size_in_pages;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001713
1714 /* Write the first page of the current stride. */
1715 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001716
Boris Brezillon767eb6f2018-09-06 14:05:21 +02001717 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
Boris Brezillon97d90da2017-11-30 18:01:29 +01001718 if (status)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001719 dev_err(dev, "[%s] Write failed.\n", __func__);
1720 }
1721
1722 /* Deselect chip 0. */
1723 chip->select_chip(mtd, saved_chip_number);
1724 return 0;
1725}
1726
Wolfram Sanga78da282012-03-21 19:29:17 +01001727static int mx23_boot_init(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001728{
1729 struct device *dev = this->dev;
1730 struct nand_chip *chip = &this->nand;
Boris BREZILLON2a690b22015-12-10 09:00:07 +01001731 struct mtd_info *mtd = nand_to_mtd(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001732 unsigned int block_count;
1733 unsigned int block;
1734 int chipnr;
1735 int page;
1736 loff_t byte;
1737 uint8_t block_mark;
1738 int ret = 0;
1739
1740 /*
1741 * If control arrives here, we can't use block mark swapping, which
1742 * means we're forced to use transcription. First, scan for the
1743 * transcription stamp. If we find it, then we don't have to do
1744 * anything -- the block marks are already transcribed.
1745 */
1746 if (mx23_check_transcription_stamp(this))
1747 return 0;
1748
1749 /*
1750 * If control arrives here, we couldn't find a transcription stamp, so
1751 * so we presume the block marks are in the conventional location.
1752 */
1753 dev_dbg(dev, "Transcribing bad block marks...\n");
1754
1755 /* Compute the number of blocks in the entire medium. */
1756 block_count = chip->chipsize >> chip->phys_erase_shift;
1757
1758 /*
1759 * Loop over all the blocks in the medium, transcribing block marks as
1760 * we go.
1761 */
1762 for (block = 0; block < block_count; block++) {
1763 /*
1764 * Compute the chip, page and byte addresses for this block's
1765 * conventional mark.
1766 */
1767 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1768 page = block << (chip->phys_erase_shift - chip->page_shift);
1769 byte = block << chip->phys_erase_shift;
1770
1771 /* Send the command to read the conventional block mark. */
1772 chip->select_chip(mtd, chipnr);
Boris Brezillon97d90da2017-11-30 18:01:29 +01001773 nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001774 block_mark = chip->read_byte(mtd);
1775 chip->select_chip(mtd, -1);
1776
1777 /*
1778 * Check if the block is marked bad. If so, we need to mark it
1779 * again, but this time the result will be a mark in the
1780 * location where we transcribe block marks.
1781 */
1782 if (block_mark != 0xff) {
1783 dev_dbg(dev, "Transcribing mark in block %u\n", block);
1784 ret = chip->block_markbad(mtd, byte);
1785 if (ret)
Lothar Waßmannd8c03722014-06-12 15:20:42 +02001786 dev_err(dev,
1787 "Failed to mark block bad with ret %d\n",
1788 ret);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001789 }
1790 }
1791
1792 /* Write the stamp that indicates we've transcribed the block marks. */
1793 mx23_write_transcription_stamp(this);
1794 return 0;
1795}
1796
Wolfram Sanga78da282012-03-21 19:29:17 +01001797static int nand_boot_init(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001798{
1799 nand_boot_set_geometry(this);
1800
1801 /* This is ROM arch-specific initilization before the BBT scanning. */
1802 if (GPMI_IS_MX23(this))
1803 return mx23_boot_init(this);
1804 return 0;
1805}
1806
Wolfram Sanga78da282012-03-21 19:29:17 +01001807static int gpmi_set_geometry(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001808{
1809 int ret;
1810
1811 /* Free the temporary DMA memory for reading ID. */
1812 gpmi_free_dma_buffer(this);
1813
1814 /* Set up the NFC geometry which is used by BCH. */
1815 ret = bch_set_geometry(this);
1816 if (ret) {
Huang Shijieda40c162013-11-20 10:09:43 +08001817 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001818 return ret;
1819 }
1820
1821 /* Alloc the new DMA buffers according to the pagesize and oobsize */
1822 return gpmi_alloc_dma_buffer(this);
1823}
1824
Huang Shijief720e7c2013-08-16 10:10:08 +08001825static int gpmi_init_last(struct gpmi_nand_data *this)
1826{
Boris BREZILLON2a690b22015-12-10 09:00:07 +01001827 struct nand_chip *chip = &this->nand;
Boris Brezillon3f158e42016-02-03 20:01:54 +01001828 struct mtd_info *mtd = nand_to_mtd(chip);
Huang Shijief720e7c2013-08-16 10:10:08 +08001829 struct nand_ecc_ctrl *ecc = &chip->ecc;
1830 struct bch_geometry *bch_geo = &this->bch_geometry;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001831 int ret;
1832
Huang Shijied7364a272013-11-14 14:25:45 +08001833 /* Set up the medium geometry */
1834 ret = gpmi_set_geometry(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001835 if (ret)
1836 return ret;
1837
Huang Shijief720e7c2013-08-16 10:10:08 +08001838 /* Init the nand_ecc_ctrl{} */
1839 ecc->read_page = gpmi_ecc_read_page;
1840 ecc->write_page = gpmi_ecc_write_page;
1841 ecc->read_oob = gpmi_ecc_read_oob;
1842 ecc->write_oob = gpmi_ecc_write_oob;
Boris BREZILLONda3bc42c2014-11-30 19:10:29 +01001843 ecc->read_page_raw = gpmi_ecc_read_page_raw;
1844 ecc->write_page_raw = gpmi_ecc_write_page_raw;
Boris BREZILLON7ca94e02014-11-30 19:10:30 +01001845 ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
1846 ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
Huang Shijief720e7c2013-08-16 10:10:08 +08001847 ecc->mode = NAND_ECC_HW;
1848 ecc->size = bch_geo->ecc_chunk_size;
1849 ecc->strength = bch_geo->ecc_strength;
Boris Brezillon3f158e42016-02-03 20:01:54 +01001850 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
Huang Shijief720e7c2013-08-16 10:10:08 +08001851
Huang Shijie995fbbf2012-09-13 14:57:59 +08001852 /*
Huang Shijieb8e29312014-01-03 11:01:42 +08001853 * We only enable the subpage read when:
1854 * (1) the chip is imx6, and
1855 * (2) the size of the ECC parity is byte aligned.
1856 */
Huang Shijie91f54982014-03-27 10:43:22 +08001857 if (GPMI_IS_MX6(this) &&
Huang Shijieb8e29312014-01-03 11:01:42 +08001858 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
1859 ecc->read_subpage = gpmi_ecc_read_subpage;
1860 chip->options |= NAND_SUBPAGE_READ;
1861 }
1862
Huang Shijief720e7c2013-08-16 10:10:08 +08001863 return 0;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001864}
1865
Miquel Raynal5f8374d2018-07-20 17:15:00 +02001866static int gpmi_nand_attach_chip(struct nand_chip *chip)
1867{
1868 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1869 int ret;
1870
1871 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
1872 chip->bbt_options |= NAND_BBT_NO_OOB;
1873
1874 if (of_property_read_bool(this->dev->of_node,
1875 "fsl,no-blockmark-swap"))
1876 this->swap_block_mark = false;
1877 }
1878 dev_dbg(this->dev, "Blockmark swapping %sabled\n",
1879 this->swap_block_mark ? "en" : "dis");
1880
1881 ret = gpmi_init_last(this);
1882 if (ret)
1883 return ret;
1884
1885 chip->options |= NAND_SKIP_BBTSCAN;
1886
1887 return 0;
1888}
1889
1890static const struct nand_controller_ops gpmi_nand_controller_ops = {
1891 .attach_chip = gpmi_nand_attach_chip,
1892};
1893
Huang Shijieccce4172013-11-14 14:25:47 +08001894static int gpmi_nand_init(struct gpmi_nand_data *this)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001895{
Huang Shijie10a2bca2011-09-08 10:47:09 +08001896 struct nand_chip *chip = &this->nand;
Boris BREZILLON2a690b22015-12-10 09:00:07 +01001897 struct mtd_info *mtd = nand_to_mtd(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001898 int ret;
1899
1900 /* init current chip */
1901 this->current_chip = -1;
1902
1903 /* init the MTD data structures */
Huang Shijie10a2bca2011-09-08 10:47:09 +08001904 mtd->name = "gpmi-nand";
Frans Klaver4dc67b12015-06-10 22:38:49 +02001905 mtd->dev.parent = this->dev;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001906
1907 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
Boris BREZILLONd699ed22015-12-10 09:00:41 +01001908 nand_set_controller_data(chip, this);
Brian Norrisa61ae812015-10-30 20:33:25 -07001909 nand_set_flash_node(chip, this->pdev->dev.of_node);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001910 chip->select_chip = gpmi_select_chip;
Miquel Raynal76e1a002018-03-02 15:38:39 +01001911 chip->setup_data_interface = gpmi_setup_data_interface;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001912 chip->cmd_ctrl = gpmi_cmd_ctrl;
1913 chip->dev_ready = gpmi_dev_ready;
1914 chip->read_byte = gpmi_read_byte;
1915 chip->read_buf = gpmi_read_buf;
1916 chip->write_buf = gpmi_write_buf;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001917 chip->badblock_pattern = &gpmi_bbt_descr;
1918 chip->block_markbad = gpmi_block_markbad;
1919 chip->options |= NAND_NO_SUBPAGE_WRITE;
Lothar Waßmann2a500af2014-03-28 11:35:06 +01001920
1921 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
1922 this->swap_block_mark = !GPMI_IS_MX23(this);
1923
Huang Shijief720e7c2013-08-16 10:10:08 +08001924 /*
1925 * Allocate a temporary DMA buffer for reading ID in the
1926 * nand_scan_ident().
1927 */
Huang Shijie10a2bca2011-09-08 10:47:09 +08001928 this->bch_geometry.payload_size = 1024;
1929 this->bch_geometry.auxiliary_size = 128;
1930 ret = gpmi_alloc_dma_buffer(this);
1931 if (ret)
1932 goto err_out;
1933
Miquel Raynal5f8374d2018-07-20 17:15:00 +02001934 chip->dummy_controller.ops = &gpmi_nand_controller_ops;
Boris Brezillon00ad3782018-09-06 14:05:14 +02001935 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
Huang Shijief720e7c2013-08-16 10:10:08 +08001936 if (ret)
1937 goto err_out;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001938
Huang Shijie885d71e2013-11-12 12:23:08 +08001939 ret = nand_boot_init(this);
1940 if (ret)
Boris Brezillon4d024232017-04-10 10:35:17 +02001941 goto err_nand_cleanup;
Boris Brezillone80eba72018-07-05 12:27:31 +02001942 ret = nand_create_bbt(chip);
Fabio Estevam899b8342015-02-09 19:22:33 -02001943 if (ret)
Boris Brezillon4d024232017-04-10 10:35:17 +02001944 goto err_nand_cleanup;
Huang Shijie885d71e2013-11-12 12:23:08 +08001945
Brian Norrisa61ae812015-10-30 20:33:25 -07001946 ret = mtd_device_register(mtd, NULL, 0);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001947 if (ret)
Boris Brezillon4d024232017-04-10 10:35:17 +02001948 goto err_nand_cleanup;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001949 return 0;
1950
Boris Brezillon4d024232017-04-10 10:35:17 +02001951err_nand_cleanup:
1952 nand_cleanup(chip);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001953err_out:
Boris Brezillon4d024232017-04-10 10:35:17 +02001954 gpmi_free_dma_buffer(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +08001955 return ret;
1956}
1957
Huang Shijiee10db1f2012-05-04 21:42:05 -04001958static const struct of_device_id gpmi_nand_id_table[] = {
1959 {
1960 .compatible = "fsl,imx23-gpmi-nand",
Lothar Waßmann6a760962014-06-12 15:20:41 +02001961 .data = &gpmi_devdata_imx23,
Huang Shijiee10db1f2012-05-04 21:42:05 -04001962 }, {
1963 .compatible = "fsl,imx28-gpmi-nand",
Lothar Waßmann6a760962014-06-12 15:20:41 +02001964 .data = &gpmi_devdata_imx28,
Huang Shijie9013bb42012-05-04 21:42:06 -04001965 }, {
1966 .compatible = "fsl,imx6q-gpmi-nand",
Lothar Waßmann6a760962014-06-12 15:20:41 +02001967 .data = &gpmi_devdata_imx6q,
Huang Shijie91f54982014-03-27 10:43:22 +08001968 }, {
1969 .compatible = "fsl,imx6sx-gpmi-nand",
Lothar Waßmann6a760962014-06-12 15:20:41 +02001970 .data = &gpmi_devdata_imx6sx,
Stefan Agnerb4af6942017-04-21 18:23:35 -07001971 }, {
1972 .compatible = "fsl,imx7d-gpmi-nand",
1973 .data = &gpmi_devdata_imx7d,
Huang Shijiee10db1f2012-05-04 21:42:05 -04001974 }, {}
1975};
1976MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
1977
Bill Pemberton06f25512012-11-19 13:23:07 -05001978static int gpmi_nand_probe(struct platform_device *pdev)
Huang Shijie10a2bca2011-09-08 10:47:09 +08001979{
Huang Shijie10a2bca2011-09-08 10:47:09 +08001980 struct gpmi_nand_data *this;
Huang Shijiee10db1f2012-05-04 21:42:05 -04001981 const struct of_device_id *of_id;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001982 int ret;
1983
Huang Shijie6189ccc2014-03-21 18:19:39 +08001984 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
1985 if (!this)
1986 return -ENOMEM;
1987
Huang Shijiee10db1f2012-05-04 21:42:05 -04001988 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
1989 if (of_id) {
Huang Shijie6189ccc2014-03-21 18:19:39 +08001990 this->devdata = of_id->data;
Huang Shijiee10db1f2012-05-04 21:42:05 -04001991 } else {
Huang Shijieda40c162013-11-20 10:09:43 +08001992 dev_err(&pdev->dev, "Failed to find the right device id.\n");
Lothar Waßmann52a073b2013-08-07 08:15:38 +02001993 return -ENODEV;
Huang Shijiee10db1f2012-05-04 21:42:05 -04001994 }
1995
Huang Shijie10a2bca2011-09-08 10:47:09 +08001996 platform_set_drvdata(pdev, this);
1997 this->pdev = pdev;
1998 this->dev = &pdev->dev;
Huang Shijie10a2bca2011-09-08 10:47:09 +08001999
2000 ret = acquire_resources(this);
2001 if (ret)
2002 goto exit_acquire_resources;
2003
Miquel Raynalb1206122018-03-02 15:38:40 +01002004 ret = gpmi_init(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +08002005 if (ret)
2006 goto exit_nfc_init;
2007
Huang Shijieccce4172013-11-14 14:25:47 +08002008 ret = gpmi_nand_init(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +08002009 if (ret)
2010 goto exit_nfc_init;
2011
Fabio Estevam490e2802012-09-05 11:35:24 -03002012 dev_info(this->dev, "driver registered.\n");
2013
Huang Shijie10a2bca2011-09-08 10:47:09 +08002014 return 0;
2015
2016exit_nfc_init:
2017 release_resources(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +08002018exit_acquire_resources:
Fabio Estevam490e2802012-09-05 11:35:24 -03002019
Huang Shijie10a2bca2011-09-08 10:47:09 +08002020 return ret;
2021}
2022
Bill Pemberton810b7e02012-11-19 13:26:04 -05002023static int gpmi_nand_remove(struct platform_device *pdev)
Huang Shijie10a2bca2011-09-08 10:47:09 +08002024{
2025 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2026
Boris Brezillon59ac2762018-09-06 14:05:15 +02002027 nand_release(&this->nand);
Boris Brezillonebb528d92017-04-10 10:35:18 +02002028 gpmi_free_dma_buffer(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +08002029 release_resources(this);
Huang Shijie10a2bca2011-09-08 10:47:09 +08002030 return 0;
2031}
2032
Huang Shijie026918e2015-12-02 16:47:40 -06002033#ifdef CONFIG_PM_SLEEP
2034static int gpmi_pm_suspend(struct device *dev)
2035{
2036 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2037
2038 release_dma_channels(this);
2039 return 0;
2040}
2041
2042static int gpmi_pm_resume(struct device *dev)
2043{
2044 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2045 int ret;
2046
2047 ret = acquire_dma_channels(this);
2048 if (ret < 0)
2049 return ret;
2050
2051 /* re-init the GPMI registers */
Huang Shijie026918e2015-12-02 16:47:40 -06002052 ret = gpmi_init(this);
2053 if (ret) {
2054 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2055 return ret;
2056 }
2057
2058 /* re-init the BCH registers */
2059 ret = bch_set_geometry(this);
2060 if (ret) {
2061 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2062 return ret;
2063 }
2064
Huang Shijie026918e2015-12-02 16:47:40 -06002065 return 0;
2066}
2067#endif /* CONFIG_PM_SLEEP */
2068
2069static const struct dev_pm_ops gpmi_pm_ops = {
2070 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2071};
2072
Huang Shijie10a2bca2011-09-08 10:47:09 +08002073static struct platform_driver gpmi_nand_driver = {
2074 .driver = {
2075 .name = "gpmi-nand",
Huang Shijie026918e2015-12-02 16:47:40 -06002076 .pm = &gpmi_pm_ops,
Huang Shijiee10db1f2012-05-04 21:42:05 -04002077 .of_match_table = gpmi_nand_id_table,
Huang Shijie10a2bca2011-09-08 10:47:09 +08002078 },
2079 .probe = gpmi_nand_probe,
Bill Pemberton5153b882012-11-19 13:21:24 -05002080 .remove = gpmi_nand_remove,
Huang Shijie10a2bca2011-09-08 10:47:09 +08002081};
Fabio Estevam490e2802012-09-05 11:35:24 -03002082module_platform_driver(gpmi_nand_driver);
Huang Shijie10a2bca2011-09-08 10:47:09 +08002083
2084MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2085MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2086MODULE_LICENSE("GPL");