blob: 2656c1ac5646e0e4bd43eeabb95bd507f926bf40 [file] [log] [blame]
Archit Tanejac76b78d2016-02-03 14:29:50 +05301/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
Boris Brezillond4092d72017-08-04 17:29:10 +020020#include <linux/mtd/rawnand.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053021#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053024#include <linux/delay.h>
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +053025#include <linux/dma/qcom_bam_dma.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053026
27/* NANDc reg offsets */
28#define NAND_FLASH_CMD 0x00
29#define NAND_ADDR0 0x04
30#define NAND_ADDR1 0x08
31#define NAND_FLASH_CHIP_SELECT 0x0c
32#define NAND_EXEC_CMD 0x10
33#define NAND_FLASH_STATUS 0x14
34#define NAND_BUFFER_STATUS 0x18
35#define NAND_DEV0_CFG0 0x20
36#define NAND_DEV0_CFG1 0x24
37#define NAND_DEV0_ECC_CFG 0x28
38#define NAND_DEV1_ECC_CFG 0x2c
39#define NAND_DEV1_CFG0 0x30
40#define NAND_DEV1_CFG1 0x34
41#define NAND_READ_ID 0x40
42#define NAND_READ_STATUS 0x44
43#define NAND_DEV_CMD0 0xa0
44#define NAND_DEV_CMD1 0xa4
45#define NAND_DEV_CMD2 0xa8
46#define NAND_DEV_CMD_VLD 0xac
47#define SFLASHC_BURST_CFG 0xe0
48#define NAND_ERASED_CW_DETECT_CFG 0xe8
49#define NAND_ERASED_CW_DETECT_STATUS 0xec
50#define NAND_EBI2_ECC_BUF_CFG 0xf0
51#define FLASH_BUF_ACC 0x100
52
53#define NAND_CTRL 0xf00
54#define NAND_VERSION 0xf08
55#define NAND_READ_LOCATION_0 0xf20
56#define NAND_READ_LOCATION_1 0xf24
Abhishek Sahu91af95c2017-08-17 17:37:43 +053057#define NAND_READ_LOCATION_2 0xf28
58#define NAND_READ_LOCATION_3 0xf2c
Archit Tanejac76b78d2016-02-03 14:29:50 +053059
60/* dummy register offsets, used by write_reg_dma */
61#define NAND_DEV_CMD1_RESTORE 0xdead
62#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
63
64/* NAND_FLASH_CMD bits */
65#define PAGE_ACC BIT(4)
66#define LAST_PAGE BIT(5)
67
68/* NAND_FLASH_CHIP_SELECT bits */
69#define NAND_DEV_SEL 0
70#define DM_EN BIT(2)
71
72/* NAND_FLASH_STATUS bits */
73#define FS_OP_ERR BIT(4)
74#define FS_READY_BSY_N BIT(5)
75#define FS_MPU_ERR BIT(8)
76#define FS_DEVICE_STS_ERR BIT(16)
77#define FS_DEVICE_WP BIT(23)
78
79/* NAND_BUFFER_STATUS bits */
80#define BS_UNCORRECTABLE_BIT BIT(8)
81#define BS_CORRECTABLE_ERR_MSK 0x1f
82
83/* NAND_DEVn_CFG0 bits */
84#define DISABLE_STATUS_AFTER_WRITE 4
85#define CW_PER_PAGE 6
86#define UD_SIZE_BYTES 9
87#define ECC_PARITY_SIZE_BYTES_RS 19
88#define SPARE_SIZE_BYTES 23
89#define NUM_ADDR_CYCLES 27
90#define STATUS_BFR_READ 30
91#define SET_RD_MODE_AFTER_STATUS 31
92
93/* NAND_DEVn_CFG0 bits */
94#define DEV0_CFG1_ECC_DISABLE 0
95#define WIDE_FLASH 1
96#define NAND_RECOVERY_CYCLES 2
97#define CS_ACTIVE_BSY 5
98#define BAD_BLOCK_BYTE_NUM 6
99#define BAD_BLOCK_IN_SPARE_AREA 16
100#define WR_RD_BSY_GAP 17
101#define ENABLE_BCH_ECC 27
102
103/* NAND_DEV0_ECC_CFG bits */
104#define ECC_CFG_ECC_DISABLE 0
105#define ECC_SW_RESET 1
106#define ECC_MODE 4
107#define ECC_PARITY_SIZE_BYTES_BCH 8
108#define ECC_NUM_DATA_BYTES 16
109#define ECC_FORCE_CLK_OPEN 30
110
111/* NAND_DEV_CMD1 bits */
112#define READ_ADDR 0
113
114/* NAND_DEV_CMD_VLD bits */
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530115#define READ_START_VLD BIT(0)
116#define READ_STOP_VLD BIT(1)
117#define WRITE_START_VLD BIT(2)
118#define ERASE_START_VLD BIT(3)
119#define SEQ_READ_START_VLD BIT(4)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530120
121/* NAND_EBI2_ECC_BUF_CFG bits */
122#define NUM_STEPS 0
123
124/* NAND_ERASED_CW_DETECT_CFG bits */
125#define ERASED_CW_ECC_MASK 1
126#define AUTO_DETECT_RES 0
127#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
128#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
129#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
130#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
131#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
132
133/* NAND_ERASED_CW_DETECT_STATUS bits */
134#define PAGE_ALL_ERASED BIT(7)
135#define CODEWORD_ALL_ERASED BIT(6)
136#define PAGE_ERASED BIT(5)
137#define CODEWORD_ERASED BIT(4)
138#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
139#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
140
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530141/* NAND_READ_LOCATION_n bits */
142#define READ_LOCATION_OFFSET 0
143#define READ_LOCATION_SIZE 16
144#define READ_LOCATION_LAST 31
145
Archit Tanejac76b78d2016-02-03 14:29:50 +0530146/* Version Mask */
147#define NAND_VERSION_MAJOR_MASK 0xf0000000
148#define NAND_VERSION_MAJOR_SHIFT 28
149#define NAND_VERSION_MINOR_MASK 0x0fff0000
150#define NAND_VERSION_MINOR_SHIFT 16
151
152/* NAND OP_CMDs */
153#define PAGE_READ 0x2
154#define PAGE_READ_WITH_ECC 0x3
155#define PAGE_READ_WITH_ECC_SPARE 0x4
156#define PROGRAM_PAGE 0x6
157#define PAGE_PROGRAM_WITH_ECC 0x7
158#define PROGRAM_PAGE_SPARE 0x9
159#define BLOCK_ERASE 0xa
160#define FETCH_ID 0xb
161#define RESET_DEVICE 0xd
162
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530163/* Default Value for NAND_DEV_CMD_VLD */
164#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
165 ERASE_START_VLD | SEQ_READ_START_VLD)
166
Abhishek Sahu9d43f912017-08-17 17:37:45 +0530167/* NAND_CTRL bits */
168#define BAM_MODE_EN BIT(0)
169
Archit Tanejac76b78d2016-02-03 14:29:50 +0530170/*
171 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
172 * the driver calls the chunks 'step' or 'codeword' interchangeably
173 */
174#define NANDC_STEP_SIZE 512
175
176/*
177 * the largest page size we support is 8K, this will have 16 steps/codewords
178 * of 512 bytes each
179 */
180#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
181
182/* we read at most 3 registers per codeword scan */
183#define MAX_REG_RD (3 * MAX_NUM_STEPS)
184
185/* ECC modes supported by the controller */
186#define ECC_NONE BIT(0)
187#define ECC_RS_4BIT BIT(1)
188#define ECC_BCH_4BIT BIT(2)
189#define ECC_BCH_8BIT BIT(3)
190
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530191#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
192nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
193 ((offset) << READ_LOCATION_OFFSET) | \
194 ((size) << READ_LOCATION_SIZE) | \
195 ((is_last) << READ_LOCATION_LAST))
196
Abhishek Sahucc409b92017-08-17 17:37:47 +0530197/*
198 * Returns the actual register address for all NAND_DEV_ registers
199 * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
200 */
201#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
202
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530203/* Returns the NAND register physical address */
204#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
205
206/* Returns the dma address for reg read buffer */
207#define reg_buf_dma_addr(chip, vaddr) \
208 ((chip)->reg_read_dma + \
209 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
210
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530211#define QPIC_PER_CW_CMD_ELEMENTS 32
Abhishek Sahucb80f112017-08-17 17:37:40 +0530212#define QPIC_PER_CW_CMD_SGL 32
213#define QPIC_PER_CW_DATA_SGL 8
214
215/*
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530216 * Flags used in DMA descriptor preparation helper functions
217 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
218 */
219/* Don't set the EOT in current tx BAM sgl */
220#define NAND_BAM_NO_EOT BIT(0)
221/* Set the NWD flag in current BAM sgl */
222#define NAND_BAM_NWD BIT(1)
223/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
224#define NAND_BAM_NEXT_SGL BIT(2)
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530225/*
226 * Erased codeword status is being used two times in single transfer so this
227 * flag will determine the current value of erased codeword status register
228 */
229#define NAND_ERASED_CW_SET BIT(4)
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530230
231/*
Abhishek Sahucb80f112017-08-17 17:37:40 +0530232 * This data type corresponds to the BAM transaction which will be used for all
233 * NAND transfers.
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530234 * @bam_ce - the array of BAM command elements
Abhishek Sahucb80f112017-08-17 17:37:40 +0530235 * @cmd_sgl - sgl for NAND BAM command pipe
236 * @data_sgl - sgl for NAND BAM consumer/producer pipe
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530237 * @bam_ce_pos - the index in bam_ce which is available for next sgl
238 * @bam_ce_start - the index in bam_ce which marks the start position ce
239 * for current sgl. It will be used for size calculation
240 * for current sgl
Abhishek Sahucb80f112017-08-17 17:37:40 +0530241 * @cmd_sgl_pos - current index in command sgl.
242 * @cmd_sgl_start - start index in command sgl.
243 * @tx_sgl_pos - current index in data sgl for tx.
244 * @tx_sgl_start - start index in data sgl for tx.
245 * @rx_sgl_pos - current index in data sgl for rx.
246 * @rx_sgl_start - start index in data sgl for rx.
247 */
248struct bam_transaction {
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530249 struct bam_cmd_element *bam_ce;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530250 struct scatterlist *cmd_sgl;
251 struct scatterlist *data_sgl;
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530252 u32 bam_ce_pos;
253 u32 bam_ce_start;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530254 u32 cmd_sgl_pos;
255 u32 cmd_sgl_start;
256 u32 tx_sgl_pos;
257 u32 tx_sgl_start;
258 u32 rx_sgl_pos;
259 u32 rx_sgl_start;
260};
261
Abhishek Sahu381dd242017-08-17 17:37:41 +0530262/*
263 * This data type corresponds to the nand dma descriptor
264 * @list - list for desc_info
265 * @dir - DMA transfer direction
266 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
267 * ADM
268 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
269 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
270 * @dma_desc - low level DMA engine descriptor
271 */
Archit Tanejac76b78d2016-02-03 14:29:50 +0530272struct desc_info {
273 struct list_head node;
274
275 enum dma_data_direction dir;
Abhishek Sahu381dd242017-08-17 17:37:41 +0530276 union {
277 struct scatterlist adm_sgl;
278 struct {
279 struct scatterlist *bam_sgl;
280 int sgl_cnt;
281 };
282 };
Archit Tanejac76b78d2016-02-03 14:29:50 +0530283 struct dma_async_tx_descriptor *dma_desc;
284};
285
286/*
287 * holds the current register values that we want to write. acts as a contiguous
288 * chunk of memory which we use to write the controller registers through DMA.
289 */
290struct nandc_regs {
291 __le32 cmd;
292 __le32 addr0;
293 __le32 addr1;
294 __le32 chip_sel;
295 __le32 exec;
296
297 __le32 cfg0;
298 __le32 cfg1;
299 __le32 ecc_bch_cfg;
300
301 __le32 clrflashstatus;
302 __le32 clrreadstatus;
303
304 __le32 cmd1;
305 __le32 vld;
306
307 __le32 orig_cmd1;
308 __le32 orig_vld;
309
310 __le32 ecc_buf_cfg;
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530311 __le32 read_location0;
312 __le32 read_location1;
313 __le32 read_location2;
314 __le32 read_location3;
315
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530316 __le32 erased_cw_detect_cfg_clr;
317 __le32 erased_cw_detect_cfg_set;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530318};
319
320/*
321 * NAND controller data struct
322 *
323 * @controller: base controller structure
324 * @host_list: list containing all the chips attached to the
325 * controller
326 * @dev: parent device
327 * @base: MMIO base
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530328 * @base_phys: physical base address of controller registers
329 * @base_dma: dma base address of controller registers
Archit Tanejac76b78d2016-02-03 14:29:50 +0530330 * @core_clk: controller clock
331 * @aon_clk: another controller clock
332 *
333 * @chan: dma channel
334 * @cmd_crci: ADM DMA CRCI for command flow control
335 * @data_crci: ADM DMA CRCI for data flow control
336 * @desc_list: DMA descriptor list (list of desc_infos)
337 *
338 * @data_buffer: our local DMA buffer for page read/writes,
339 * used when we can't use the buffer provided
340 * by upper layers directly
341 * @buf_size/count/start: markers for chip->read_buf/write_buf functions
342 * @reg_read_buf: local buffer for reading back registers via DMA
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530343 * @reg_read_dma: contains dma address for register read buffer
Archit Tanejac76b78d2016-02-03 14:29:50 +0530344 * @reg_read_pos: marker for data read in reg_read_buf
345 *
346 * @regs: a contiguous chunk of memory for DMA register
347 * writes. contains the register values to be
348 * written to controller
349 * @cmd1/vld: some fixed controller register values
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530350 * @props: properties of current NAND controller,
Archit Tanejac76b78d2016-02-03 14:29:50 +0530351 * initialized via DT match data
Abhishek Sahucb80f112017-08-17 17:37:40 +0530352 * @max_cwperpage: maximum QPIC codewords required. calculated
353 * from all connected NAND devices pagesize
Archit Tanejac76b78d2016-02-03 14:29:50 +0530354 */
355struct qcom_nand_controller {
356 struct nand_hw_control controller;
357 struct list_head host_list;
358
359 struct device *dev;
360
361 void __iomem *base;
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530362 phys_addr_t base_phys;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530363 dma_addr_t base_dma;
364
365 struct clk *core_clk;
366 struct clk *aon_clk;
367
Abhishek Sahu497d7d82017-08-11 17:09:19 +0530368 union {
369 /* will be used only by QPIC for BAM DMA */
370 struct {
371 struct dma_chan *tx_chan;
372 struct dma_chan *rx_chan;
373 struct dma_chan *cmd_chan;
374 };
375
376 /* will be used only by EBI2 for ADM DMA */
377 struct {
378 struct dma_chan *chan;
379 unsigned int cmd_crci;
380 unsigned int data_crci;
381 };
382 };
383
Archit Tanejac76b78d2016-02-03 14:29:50 +0530384 struct list_head desc_list;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530385 struct bam_transaction *bam_txn;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530386
387 u8 *data_buffer;
388 int buf_size;
389 int buf_count;
390 int buf_start;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530391 unsigned int max_cwperpage;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530392
393 __le32 *reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530394 dma_addr_t reg_read_dma;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530395 int reg_read_pos;
396
397 struct nandc_regs *regs;
398
399 u32 cmd1, vld;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530400 const struct qcom_nandc_props *props;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530401};
402
403/*
404 * NAND chip structure
405 *
406 * @chip: base NAND chip structure
407 * @node: list node to add itself to host_list in
408 * qcom_nand_controller
409 *
410 * @cs: chip select value for this chip
411 * @cw_size: the number of bytes in a single step/codeword
412 * of a page, consisting of all data, ecc, spare
413 * and reserved bytes
414 * @cw_data: the number of bytes within a codeword protected
415 * by ECC
416 * @use_ecc: request the controller to use ECC for the
417 * upcoming read/write
418 * @bch_enabled: flag to tell whether BCH ECC mode is used
419 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
420 * chip
421 * @status: value to be returned if NAND_CMD_STATUS command
422 * is executed
423 * @last_command: keeps track of last command on this chip. used
424 * for reading correct status
425 *
426 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
427 * ecc/non-ecc mode for the current nand flash
428 * device
429 */
430struct qcom_nand_host {
431 struct nand_chip chip;
432 struct list_head node;
433
434 int cs;
435 int cw_size;
436 int cw_data;
437 bool use_ecc;
438 bool bch_enabled;
439 int ecc_bytes_hw;
440 int spare_bytes;
441 int bbm_size;
442 u8 status;
443 int last_command;
444
445 u32 cfg0, cfg1;
446 u32 cfg0_raw, cfg1_raw;
447 u32 ecc_buf_cfg;
448 u32 ecc_bch_cfg;
449 u32 clrflashstatus;
450 u32 clrreadstatus;
451};
452
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530453/*
454 * This data type corresponds to the NAND controller properties which varies
455 * among different NAND controllers.
456 * @ecc_modes - ecc mode for NAND
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530457 * @is_bam - whether NAND controller is using BAM
Abhishek Sahucc409b92017-08-17 17:37:47 +0530458 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530459 */
460struct qcom_nandc_props {
461 u32 ecc_modes;
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530462 bool is_bam;
Abhishek Sahucc409b92017-08-17 17:37:47 +0530463 u32 dev_cmd_reg_start;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530464};
465
Abhishek Sahucb80f112017-08-17 17:37:40 +0530466/* Frees the BAM transaction memory */
467static void free_bam_transaction(struct qcom_nand_controller *nandc)
468{
469 struct bam_transaction *bam_txn = nandc->bam_txn;
470
471 devm_kfree(nandc->dev, bam_txn);
472}
473
474/* Allocates and Initializes the BAM transaction */
475static struct bam_transaction *
476alloc_bam_transaction(struct qcom_nand_controller *nandc)
477{
478 struct bam_transaction *bam_txn;
479 size_t bam_txn_size;
480 unsigned int num_cw = nandc->max_cwperpage;
481 void *bam_txn_buf;
482
483 bam_txn_size =
484 sizeof(*bam_txn) + num_cw *
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530485 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
486 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
Abhishek Sahucb80f112017-08-17 17:37:40 +0530487 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
488
489 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
490 if (!bam_txn_buf)
491 return NULL;
492
493 bam_txn = bam_txn_buf;
494 bam_txn_buf += sizeof(*bam_txn);
495
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530496 bam_txn->bam_ce = bam_txn_buf;
497 bam_txn_buf +=
498 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
499
Abhishek Sahucb80f112017-08-17 17:37:40 +0530500 bam_txn->cmd_sgl = bam_txn_buf;
501 bam_txn_buf +=
502 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
503
504 bam_txn->data_sgl = bam_txn_buf;
505
506 return bam_txn;
507}
508
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530509/* Clears the BAM transaction indexes */
510static void clear_bam_transaction(struct qcom_nand_controller *nandc)
511{
512 struct bam_transaction *bam_txn = nandc->bam_txn;
513
514 if (!nandc->props->is_bam)
515 return;
516
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530517 bam_txn->bam_ce_pos = 0;
518 bam_txn->bam_ce_start = 0;
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530519 bam_txn->cmd_sgl_pos = 0;
520 bam_txn->cmd_sgl_start = 0;
521 bam_txn->tx_sgl_pos = 0;
522 bam_txn->tx_sgl_start = 0;
523 bam_txn->rx_sgl_pos = 0;
524 bam_txn->rx_sgl_start = 0;
525
526 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
527 QPIC_PER_CW_CMD_SGL);
528 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
529 QPIC_PER_CW_DATA_SGL);
530}
531
Archit Tanejac76b78d2016-02-03 14:29:50 +0530532static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
533{
534 return container_of(chip, struct qcom_nand_host, chip);
535}
536
537static inline struct qcom_nand_controller *
538get_qcom_nand_controller(struct nand_chip *chip)
539{
540 return container_of(chip->controller, struct qcom_nand_controller,
541 controller);
542}
543
544static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
545{
546 return ioread32(nandc->base + offset);
547}
548
549static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
550 u32 val)
551{
552 iowrite32(val, nandc->base + offset);
553}
554
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530555static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
556 bool is_cpu)
557{
558 if (!nandc->props->is_bam)
559 return;
560
561 if (is_cpu)
562 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
563 MAX_REG_RD *
564 sizeof(*nandc->reg_read_buf),
565 DMA_FROM_DEVICE);
566 else
567 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
568 MAX_REG_RD *
569 sizeof(*nandc->reg_read_buf),
570 DMA_FROM_DEVICE);
571}
572
Archit Tanejac76b78d2016-02-03 14:29:50 +0530573static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
574{
575 switch (offset) {
576 case NAND_FLASH_CMD:
577 return &regs->cmd;
578 case NAND_ADDR0:
579 return &regs->addr0;
580 case NAND_ADDR1:
581 return &regs->addr1;
582 case NAND_FLASH_CHIP_SELECT:
583 return &regs->chip_sel;
584 case NAND_EXEC_CMD:
585 return &regs->exec;
586 case NAND_FLASH_STATUS:
587 return &regs->clrflashstatus;
588 case NAND_DEV0_CFG0:
589 return &regs->cfg0;
590 case NAND_DEV0_CFG1:
591 return &regs->cfg1;
592 case NAND_DEV0_ECC_CFG:
593 return &regs->ecc_bch_cfg;
594 case NAND_READ_STATUS:
595 return &regs->clrreadstatus;
596 case NAND_DEV_CMD1:
597 return &regs->cmd1;
598 case NAND_DEV_CMD1_RESTORE:
599 return &regs->orig_cmd1;
600 case NAND_DEV_CMD_VLD:
601 return &regs->vld;
602 case NAND_DEV_CMD_VLD_RESTORE:
603 return &regs->orig_vld;
604 case NAND_EBI2_ECC_BUF_CFG:
605 return &regs->ecc_buf_cfg;
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530606 case NAND_READ_LOCATION_0:
607 return &regs->read_location0;
608 case NAND_READ_LOCATION_1:
609 return &regs->read_location1;
610 case NAND_READ_LOCATION_2:
611 return &regs->read_location2;
612 case NAND_READ_LOCATION_3:
613 return &regs->read_location3;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530614 default:
615 return NULL;
616 }
617}
618
619static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
620 u32 val)
621{
622 struct nandc_regs *regs = nandc->regs;
623 __le32 *reg;
624
625 reg = offset_to_nandc_reg(regs, offset);
626
627 if (reg)
628 *reg = cpu_to_le32(val);
629}
630
631/* helper to configure address register values */
632static void set_address(struct qcom_nand_host *host, u16 column, int page)
633{
634 struct nand_chip *chip = &host->chip;
635 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
636
637 if (chip->options & NAND_BUSWIDTH_16)
638 column >>= 1;
639
640 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
641 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
642}
643
644/*
645 * update_rw_regs: set up read/write register values, these will be
646 * written to the NAND controller registers via DMA
647 *
648 * @num_cw: number of steps for the read/write operation
649 * @read: read or write operation
650 */
651static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
652{
653 struct nand_chip *chip = &host->chip;
654 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
655 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
656
657 if (read) {
658 if (host->use_ecc)
659 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
660 else
661 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
662 } else {
663 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
664 }
665
666 if (host->use_ecc) {
667 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
668 (num_cw - 1) << CW_PER_PAGE;
669
670 cfg1 = host->cfg1;
671 ecc_bch_cfg = host->ecc_bch_cfg;
672 } else {
673 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
674 (num_cw - 1) << CW_PER_PAGE;
675
676 cfg1 = host->cfg1_raw;
677 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
678 }
679
680 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
681 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
682 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
683 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
684 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
685 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
686 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
687 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530688
689 if (read)
690 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
691 host->cw_data : host->cw_size, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530692}
693
Abhishek Sahu381dd242017-08-17 17:37:41 +0530694/*
695 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
696 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
697 * which will be submitted to DMA engine.
698 */
699static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
700 struct dma_chan *chan,
701 unsigned long flags)
702{
703 struct desc_info *desc;
704 struct scatterlist *sgl;
705 unsigned int sgl_cnt;
706 int ret;
707 struct bam_transaction *bam_txn = nandc->bam_txn;
708 enum dma_transfer_direction dir_eng;
709 struct dma_async_tx_descriptor *dma_desc;
710
711 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
712 if (!desc)
713 return -ENOMEM;
714
715 if (chan == nandc->cmd_chan) {
716 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
717 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
718 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
719 dir_eng = DMA_MEM_TO_DEV;
720 desc->dir = DMA_TO_DEVICE;
721 } else if (chan == nandc->tx_chan) {
722 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
723 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
724 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
725 dir_eng = DMA_MEM_TO_DEV;
726 desc->dir = DMA_TO_DEVICE;
727 } else {
728 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
729 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
730 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
731 dir_eng = DMA_DEV_TO_MEM;
732 desc->dir = DMA_FROM_DEVICE;
733 }
734
735 sg_mark_end(sgl + sgl_cnt - 1);
736 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
737 if (ret == 0) {
738 dev_err(nandc->dev, "failure in mapping desc\n");
739 kfree(desc);
740 return -ENOMEM;
741 }
742
743 desc->sgl_cnt = sgl_cnt;
744 desc->bam_sgl = sgl;
745
746 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
747 flags);
748
749 if (!dma_desc) {
750 dev_err(nandc->dev, "failure in prep desc\n");
751 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
752 kfree(desc);
753 return -EINVAL;
754 }
755
756 desc->dma_desc = dma_desc;
757
758 list_add_tail(&desc->node, &nandc->desc_list);
759
760 return 0;
761}
762
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530763/*
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530764 * Prepares the command descriptor for BAM DMA which will be used for NAND
765 * register reads and writes. The command descriptor requires the command
766 * to be formed in command element type so this function uses the command
767 * element from bam transaction ce array and fills the same with required
768 * data. A single SGL can contain multiple command elements so
769 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
770 * after the current command element.
771 */
772static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
773 int reg_off, const void *vaddr,
774 int size, unsigned int flags)
775{
776 int bam_ce_size;
777 int i, ret;
778 struct bam_cmd_element *bam_ce_buffer;
779 struct bam_transaction *bam_txn = nandc->bam_txn;
780
781 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
782
783 /* fill the command desc */
784 for (i = 0; i < size; i++) {
785 if (read)
786 bam_prep_ce(&bam_ce_buffer[i],
787 nandc_reg_phys(nandc, reg_off + 4 * i),
788 BAM_READ_COMMAND,
789 reg_buf_dma_addr(nandc,
790 (__le32 *)vaddr + i));
791 else
792 bam_prep_ce_le32(&bam_ce_buffer[i],
793 nandc_reg_phys(nandc, reg_off + 4 * i),
794 BAM_WRITE_COMMAND,
795 *((__le32 *)vaddr + i));
796 }
797
798 bam_txn->bam_ce_pos += size;
799
800 /* use the separate sgl after this command */
801 if (flags & NAND_BAM_NEXT_SGL) {
802 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
803 bam_ce_size = (bam_txn->bam_ce_pos -
804 bam_txn->bam_ce_start) *
805 sizeof(struct bam_cmd_element);
806 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
807 bam_ce_buffer, bam_ce_size);
808 bam_txn->cmd_sgl_pos++;
809 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
810
811 if (flags & NAND_BAM_NWD) {
812 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
813 DMA_PREP_FENCE |
814 DMA_PREP_CMD);
815 if (ret)
816 return ret;
817 }
818 }
819
820 return 0;
821}
822
823/*
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530824 * Prepares the data descriptor for BAM DMA which will be used for NAND
825 * data reads and writes.
826 */
827static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
828 const void *vaddr,
829 int size, unsigned int flags)
830{
831 int ret;
832 struct bam_transaction *bam_txn = nandc->bam_txn;
833
834 if (read) {
835 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
836 vaddr, size);
837 bam_txn->rx_sgl_pos++;
838 } else {
839 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
840 vaddr, size);
841 bam_txn->tx_sgl_pos++;
842
843 /*
844 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
845 * is not set, form the DMA descriptor
846 */
847 if (!(flags & NAND_BAM_NO_EOT)) {
848 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
849 DMA_PREP_INTERRUPT);
850 if (ret)
851 return ret;
852 }
853 }
854
855 return 0;
856}
857
Abhishek Sahu381dd242017-08-17 17:37:41 +0530858static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
859 int reg_off, const void *vaddr, int size,
860 bool flow_control)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530861{
862 struct desc_info *desc;
863 struct dma_async_tx_descriptor *dma_desc;
864 struct scatterlist *sgl;
865 struct dma_slave_config slave_conf;
866 enum dma_transfer_direction dir_eng;
867 int ret;
868
869 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
870 if (!desc)
871 return -ENOMEM;
872
Abhishek Sahu381dd242017-08-17 17:37:41 +0530873 sgl = &desc->adm_sgl;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530874
875 sg_init_one(sgl, vaddr, size);
876
877 if (read) {
878 dir_eng = DMA_DEV_TO_MEM;
879 desc->dir = DMA_FROM_DEVICE;
880 } else {
881 dir_eng = DMA_MEM_TO_DEV;
882 desc->dir = DMA_TO_DEVICE;
883 }
884
885 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
886 if (ret == 0) {
887 ret = -ENOMEM;
888 goto err;
889 }
890
891 memset(&slave_conf, 0x00, sizeof(slave_conf));
892
893 slave_conf.device_fc = flow_control;
894 if (read) {
895 slave_conf.src_maxburst = 16;
896 slave_conf.src_addr = nandc->base_dma + reg_off;
897 slave_conf.slave_id = nandc->data_crci;
898 } else {
899 slave_conf.dst_maxburst = 16;
900 slave_conf.dst_addr = nandc->base_dma + reg_off;
901 slave_conf.slave_id = nandc->cmd_crci;
902 }
903
904 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
905 if (ret) {
906 dev_err(nandc->dev, "failed to configure dma channel\n");
907 goto err;
908 }
909
910 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
911 if (!dma_desc) {
912 dev_err(nandc->dev, "failed to prepare desc\n");
913 ret = -EINVAL;
914 goto err;
915 }
916
917 desc->dma_desc = dma_desc;
918
919 list_add_tail(&desc->node, &nandc->desc_list);
920
921 return 0;
922err:
923 kfree(desc);
924
925 return ret;
926}
927
928/*
929 * read_reg_dma: prepares a descriptor to read a given number of
930 * contiguous registers to the reg_read_buf pointer
931 *
932 * @first: offset of the first register in the contiguous block
933 * @num_regs: number of registers to read
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530934 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530935 */
936static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530937 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530938{
939 bool flow_control = false;
940 void *vaddr;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530941
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530942 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
943 nandc->reg_read_pos += num_regs;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530944
Abhishek Sahucc409b92017-08-17 17:37:47 +0530945 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
946 first = dev_cmd_reg_addr(nandc, first);
947
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530948 if (nandc->props->is_bam)
949 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
950 num_regs, flags);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530951
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530952 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
953 flow_control = true;
954
955 return prep_adm_dma_desc(nandc, true, first, vaddr,
956 num_regs * sizeof(u32), flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530957}
958
959/*
960 * write_reg_dma: prepares a descriptor to write a given number of
961 * contiguous registers
962 *
963 * @first: offset of the first register in the contiguous block
964 * @num_regs: number of registers to write
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530965 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530966 */
967static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530968 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530969{
970 bool flow_control = false;
971 struct nandc_regs *regs = nandc->regs;
972 void *vaddr;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530973
974 vaddr = offset_to_nandc_reg(regs, first);
975
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530976 if (first == NAND_ERASED_CW_DETECT_CFG) {
977 if (flags & NAND_ERASED_CW_SET)
978 vaddr = &regs->erased_cw_detect_cfg_set;
979 else
980 vaddr = &regs->erased_cw_detect_cfg_clr;
981 }
982
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530983 if (first == NAND_EXEC_CMD)
984 flags |= NAND_BAM_NWD;
985
Abhishek Sahucc409b92017-08-17 17:37:47 +0530986 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
987 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530988
Abhishek Sahucc409b92017-08-17 17:37:47 +0530989 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
990 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530991
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530992 if (nandc->props->is_bam)
993 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
994 num_regs, flags);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530995
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530996 if (first == NAND_FLASH_CMD)
997 flow_control = true;
998
999 return prep_adm_dma_desc(nandc, false, first, vaddr,
1000 num_regs * sizeof(u32), flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301001}
1002
1003/*
1004 * read_data_dma: prepares a DMA descriptor to transfer data from the
1005 * controller's internal buffer to the buffer 'vaddr'
1006 *
1007 * @reg_off: offset within the controller's data buffer
1008 * @vaddr: virtual address of the buffer we want to write to
1009 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301010 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +05301011 */
1012static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301013 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301014{
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301015 if (nandc->props->is_bam)
1016 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1017
Abhishek Sahu381dd242017-08-17 17:37:41 +05301018 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301019}
1020
1021/*
1022 * write_data_dma: prepares a DMA descriptor to transfer data from
1023 * 'vaddr' to the controller's internal buffer
1024 *
1025 * @reg_off: offset within the controller's data buffer
1026 * @vaddr: virtual address of the buffer we want to read from
1027 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301028 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +05301029 */
1030static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301031 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301032{
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301033 if (nandc->props->is_bam)
1034 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1035
Abhishek Sahu381dd242017-08-17 17:37:41 +05301036 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301037}
1038
1039/*
Abhishek Sahubde43302017-07-19 17:17:55 +05301040 * Helper to prepare DMA descriptors for configuring registers
1041 * before reading a NAND page.
Archit Tanejac76b78d2016-02-03 14:29:50 +05301042 */
Abhishek Sahubde43302017-07-19 17:17:55 +05301043static void config_nand_page_read(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301044{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301045 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1046 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1047 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
Abhishek Sahua86b9c42017-08-17 17:37:44 +05301048 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1049 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1050 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
Abhishek Sahubde43302017-07-19 17:17:55 +05301051}
Archit Tanejac76b78d2016-02-03 14:29:50 +05301052
Abhishek Sahubde43302017-07-19 17:17:55 +05301053/*
1054 * Helper to prepare DMA descriptors for configuring registers
1055 * before reading each codeword in NAND page.
1056 */
1057static void config_nand_cw_read(struct qcom_nand_controller *nandc)
1058{
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301059 if (nandc->props->is_bam)
1060 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1061 NAND_BAM_NEXT_SGL);
1062
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301063 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1064 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301065
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301066 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1067 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1068 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301069}
1070
1071/*
Abhishek Sahubde43302017-07-19 17:17:55 +05301072 * Helper to prepare dma descriptors to configure registers needed for reading a
1073 * single codeword in page
Archit Tanejac76b78d2016-02-03 14:29:50 +05301074 */
Abhishek Sahubde43302017-07-19 17:17:55 +05301075static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
1076{
1077 config_nand_page_read(nandc);
1078 config_nand_cw_read(nandc);
1079}
1080
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301081/*
1082 * Helper to prepare DMA descriptors used to configure registers needed for
1083 * before writing a NAND page.
1084 */
1085static void config_nand_page_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301086{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301087 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1088 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1089 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1090 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301091}
1092
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301093/*
1094 * Helper to prepare DMA descriptors for configuring registers
1095 * before writing each codeword in NAND page.
1096 */
1097static void config_nand_cw_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301098{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301099 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1100 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301101
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301102 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301103
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301104 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1105 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301106}
1107
1108/*
1109 * the following functions are used within chip->cmdfunc() to perform different
1110 * NAND_CMD_* commands
1111 */
1112
1113/* sets up descriptors for NAND_CMD_PARAM */
1114static int nandc_param(struct qcom_nand_host *host)
1115{
1116 struct nand_chip *chip = &host->chip;
1117 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1118
1119 /*
1120 * NAND_CMD_PARAM is called before we know much about the FLASH chip
1121 * in use. we configure the controller to perform a raw read of 512
1122 * bytes to read onfi params
1123 */
1124 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1125 nandc_set_reg(nandc, NAND_ADDR0, 0);
1126 nandc_set_reg(nandc, NAND_ADDR1, 0);
1127 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1128 | 512 << UD_SIZE_BYTES
1129 | 5 << NUM_ADDR_CYCLES
1130 | 0 << SPARE_SIZE_BYTES);
1131 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1132 | 0 << CS_ACTIVE_BSY
1133 | 17 << BAD_BLOCK_BYTE_NUM
1134 | 1 << BAD_BLOCK_IN_SPARE_AREA
1135 | 2 << WR_RD_BSY_GAP
1136 | 0 << WIDE_FLASH
1137 | 1 << DEV0_CFG1_ECC_DISABLE);
1138 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1139
1140 /* configure CMD1 and VLD for ONFI param probing */
1141 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
Abhishek Sahud8a9b322017-08-11 17:09:16 +05301142 (nandc->vld & ~READ_START_VLD));
Archit Tanejac76b78d2016-02-03 14:29:50 +05301143 nandc_set_reg(nandc, NAND_DEV_CMD1,
1144 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1145 | NAND_CMD_PARAM << READ_ADDR);
1146
1147 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1148
1149 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1150 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301151 nandc_set_read_loc(nandc, 0, 0, 512, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301152
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301153 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1154 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301155
1156 nandc->buf_count = 512;
1157 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1158
Abhishek Sahubde43302017-07-19 17:17:55 +05301159 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301160
1161 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301162 nandc->buf_count, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301163
1164 /* restore CMD1 and VLD regs */
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301165 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1166 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301167
1168 return 0;
1169}
1170
1171/* sets up descriptors for NAND_CMD_ERASE1 */
1172static int erase_block(struct qcom_nand_host *host, int page_addr)
1173{
1174 struct nand_chip *chip = &host->chip;
1175 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1176
1177 nandc_set_reg(nandc, NAND_FLASH_CMD,
1178 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1179 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1180 nandc_set_reg(nandc, NAND_ADDR1, 0);
1181 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1182 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1183 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1184 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1185 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1186 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1187
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301188 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1189 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1190 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301191
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301192 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301193
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301194 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1195 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301196
1197 return 0;
1198}
1199
1200/* sets up descriptors for NAND_CMD_READID */
1201static int read_id(struct qcom_nand_host *host, int column)
1202{
1203 struct nand_chip *chip = &host->chip;
1204 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1205
1206 if (column == -1)
1207 return 0;
1208
1209 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1210 nandc_set_reg(nandc, NAND_ADDR0, column);
1211 nandc_set_reg(nandc, NAND_ADDR1, 0);
Abhishek Sahu9d43f912017-08-17 17:37:45 +05301212 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1213 nandc->props->is_bam ? 0 : DM_EN);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301214 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1215
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301216 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1217 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301218
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301219 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301220
1221 return 0;
1222}
1223
1224/* sets up descriptors for NAND_CMD_RESET */
1225static int reset(struct qcom_nand_host *host)
1226{
1227 struct nand_chip *chip = &host->chip;
1228 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1229
1230 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1231 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1232
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301233 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1234 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301235
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301236 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301237
1238 return 0;
1239}
1240
1241/* helpers to submit/free our list of dma descriptors */
1242static int submit_descs(struct qcom_nand_controller *nandc)
1243{
1244 struct desc_info *desc;
1245 dma_cookie_t cookie = 0;
Abhishek Sahu381dd242017-08-17 17:37:41 +05301246 struct bam_transaction *bam_txn = nandc->bam_txn;
1247 int r;
1248
1249 if (nandc->props->is_bam) {
1250 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1251 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1252 if (r)
1253 return r;
1254 }
1255
1256 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1257 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1258 DMA_PREP_INTERRUPT);
1259 if (r)
1260 return r;
1261 }
1262
1263 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +05301264 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1265 DMA_PREP_CMD);
Abhishek Sahu381dd242017-08-17 17:37:41 +05301266 if (r)
1267 return r;
1268 }
1269 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301270
1271 list_for_each_entry(desc, &nandc->desc_list, node)
1272 cookie = dmaengine_submit(desc->dma_desc);
1273
Abhishek Sahu381dd242017-08-17 17:37:41 +05301274 if (nandc->props->is_bam) {
1275 dma_async_issue_pending(nandc->tx_chan);
1276 dma_async_issue_pending(nandc->rx_chan);
1277
1278 if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
1279 return -ETIMEDOUT;
1280 } else {
1281 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1282 return -ETIMEDOUT;
1283 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301284
1285 return 0;
1286}
1287
1288static void free_descs(struct qcom_nand_controller *nandc)
1289{
1290 struct desc_info *desc, *n;
1291
1292 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1293 list_del(&desc->node);
Abhishek Sahu381dd242017-08-17 17:37:41 +05301294
1295 if (nandc->props->is_bam)
1296 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1297 desc->sgl_cnt, desc->dir);
1298 else
1299 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1300 desc->dir);
1301
Archit Tanejac76b78d2016-02-03 14:29:50 +05301302 kfree(desc);
1303 }
1304}
1305
1306/* reset the register read buffer for next NAND operation */
1307static void clear_read_regs(struct qcom_nand_controller *nandc)
1308{
1309 nandc->reg_read_pos = 0;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301310 nandc_read_buffer_sync(nandc, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301311}
1312
1313static void pre_command(struct qcom_nand_host *host, int command)
1314{
1315 struct nand_chip *chip = &host->chip;
1316 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1317
1318 nandc->buf_count = 0;
1319 nandc->buf_start = 0;
1320 host->use_ecc = false;
1321 host->last_command = command;
1322
1323 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301324
1325 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1326 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1327 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301328}
1329
1330/*
1331 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1332 * privately maintained status byte, this status byte can be read after
1333 * NAND_CMD_STATUS is called
1334 */
1335static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1336{
1337 struct nand_chip *chip = &host->chip;
1338 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1339 struct nand_ecc_ctrl *ecc = &chip->ecc;
1340 int num_cw;
1341 int i;
1342
1343 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301344 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301345
1346 for (i = 0; i < num_cw; i++) {
1347 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1348
1349 if (flash_status & FS_MPU_ERR)
1350 host->status &= ~NAND_STATUS_WP;
1351
1352 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1353 (flash_status &
1354 FS_DEVICE_STS_ERR)))
1355 host->status |= NAND_STATUS_FAIL;
1356 }
1357}
1358
1359static void post_command(struct qcom_nand_host *host, int command)
1360{
1361 struct nand_chip *chip = &host->chip;
1362 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1363
1364 switch (command) {
1365 case NAND_CMD_READID:
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301366 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301367 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1368 nandc->buf_count);
1369 break;
1370 case NAND_CMD_PAGEPROG:
1371 case NAND_CMD_ERASE1:
1372 parse_erase_write_errors(host, command);
1373 break;
1374 default:
1375 break;
1376 }
1377}
1378
1379/*
1380 * Implements chip->cmdfunc. It's only used for a limited set of commands.
1381 * The rest of the commands wouldn't be called by upper layers. For example,
1382 * NAND_CMD_READOOB would never be called because we have our own versions
1383 * of read_oob ops for nand_ecc_ctrl.
1384 */
1385static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1386 int column, int page_addr)
1387{
1388 struct nand_chip *chip = mtd_to_nand(mtd);
1389 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1390 struct nand_ecc_ctrl *ecc = &chip->ecc;
1391 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1392 bool wait = false;
1393 int ret = 0;
1394
1395 pre_command(host, command);
1396
1397 switch (command) {
1398 case NAND_CMD_RESET:
1399 ret = reset(host);
1400 wait = true;
1401 break;
1402
1403 case NAND_CMD_READID:
1404 nandc->buf_count = 4;
1405 ret = read_id(host, column);
1406 wait = true;
1407 break;
1408
1409 case NAND_CMD_PARAM:
1410 ret = nandc_param(host);
1411 wait = true;
1412 break;
1413
1414 case NAND_CMD_ERASE1:
1415 ret = erase_block(host, page_addr);
1416 wait = true;
1417 break;
1418
1419 case NAND_CMD_READ0:
1420 /* we read the entire page for now */
1421 WARN_ON(column != 0);
1422
1423 host->use_ecc = true;
1424 set_address(host, 0, page_addr);
1425 update_rw_regs(host, ecc->steps, true);
1426 break;
1427
1428 case NAND_CMD_SEQIN:
1429 WARN_ON(column != 0);
1430 set_address(host, 0, page_addr);
1431 break;
1432
1433 case NAND_CMD_PAGEPROG:
1434 case NAND_CMD_STATUS:
1435 case NAND_CMD_NONE:
1436 default:
1437 break;
1438 }
1439
1440 if (ret) {
1441 dev_err(nandc->dev, "failure executing command %d\n",
1442 command);
1443 free_descs(nandc);
1444 return;
1445 }
1446
1447 if (wait) {
1448 ret = submit_descs(nandc);
1449 if (ret)
1450 dev_err(nandc->dev,
1451 "failure submitting descs for command %d\n",
1452 command);
1453 }
1454
1455 free_descs(nandc);
1456
1457 post_command(host, command);
1458}
1459
1460/*
1461 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1462 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1463 *
1464 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1465 * but it notifies that it is an erased CW by placing special characters at
1466 * certain offsets in the buffer.
1467 *
1468 * verify if the page is erased or not, and fix up the page for RS ECC by
1469 * replacing the special characters with 0xff.
1470 */
1471static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1472{
1473 u8 empty1, empty2;
1474
1475 /*
1476 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1477 * is erased by looking for 0x54s at offsets 3 and 175 from the
1478 * beginning of each codeword
1479 */
1480
1481 empty1 = data_buf[3];
1482 empty2 = data_buf[175];
1483
1484 /*
1485 * if the erased codework markers, if they exist override them with
1486 * 0xffs
1487 */
1488 if ((empty1 == 0x54 && empty2 == 0xff) ||
1489 (empty1 == 0xff && empty2 == 0x54)) {
1490 data_buf[3] = 0xff;
1491 data_buf[175] = 0xff;
1492 }
1493
1494 /*
1495 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1496 * restore the original values at the special offsets
1497 */
1498 if (memchr_inv(data_buf, 0xff, data_len)) {
1499 data_buf[3] = empty1;
1500 data_buf[175] = empty2;
1501
1502 return false;
1503 }
1504
1505 return true;
1506}
1507
1508struct read_stats {
1509 __le32 flash;
1510 __le32 buffer;
1511 __le32 erased_cw;
1512};
1513
1514/*
1515 * reads back status registers set by the controller to notify page read
1516 * errors. this is equivalent to what 'ecc->correct()' would do.
1517 */
1518static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1519 u8 *oob_buf)
1520{
1521 struct nand_chip *chip = &host->chip;
1522 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1523 struct mtd_info *mtd = nand_to_mtd(chip);
1524 struct nand_ecc_ctrl *ecc = &chip->ecc;
1525 unsigned int max_bitflips = 0;
1526 struct read_stats *buf;
1527 int i;
1528
1529 buf = (struct read_stats *)nandc->reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301530 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301531
1532 for (i = 0; i < ecc->steps; i++, buf++) {
1533 u32 flash, buffer, erased_cw;
1534 int data_len, oob_len;
1535
1536 if (i == (ecc->steps - 1)) {
1537 data_len = ecc->size - ((ecc->steps - 1) << 2);
1538 oob_len = ecc->steps << 2;
1539 } else {
1540 data_len = host->cw_data;
1541 oob_len = 0;
1542 }
1543
1544 flash = le32_to_cpu(buf->flash);
1545 buffer = le32_to_cpu(buf->buffer);
1546 erased_cw = le32_to_cpu(buf->erased_cw);
1547
1548 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1549 bool erased;
1550
1551 /* ignore erased codeword errors */
1552 if (host->bch_enabled) {
1553 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1554 true : false;
1555 } else {
1556 erased = erased_chunk_check_and_fixup(data_buf,
1557 data_len);
1558 }
1559
1560 if (erased) {
1561 data_buf += data_len;
1562 if (oob_buf)
1563 oob_buf += oob_len + ecc->bytes;
1564 continue;
1565 }
1566
1567 if (buffer & BS_UNCORRECTABLE_BIT) {
1568 int ret, ecclen, extraooblen;
1569 void *eccbuf;
1570
1571 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1572 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1573 extraooblen = oob_buf ? oob_len : 0;
1574
1575 /*
1576 * make sure it isn't an erased page reported
1577 * as not-erased by HW because of a few bitflips
1578 */
1579 ret = nand_check_erased_ecc_chunk(data_buf,
1580 data_len, eccbuf, ecclen, oob_buf,
1581 extraooblen, ecc->strength);
1582 if (ret < 0) {
1583 mtd->ecc_stats.failed++;
1584 } else {
1585 mtd->ecc_stats.corrected += ret;
1586 max_bitflips =
1587 max_t(unsigned int, max_bitflips, ret);
1588 }
1589 }
1590 } else {
1591 unsigned int stat;
1592
1593 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1594 mtd->ecc_stats.corrected += stat;
1595 max_bitflips = max(max_bitflips, stat);
1596 }
1597
1598 data_buf += data_len;
1599 if (oob_buf)
1600 oob_buf += oob_len + ecc->bytes;
1601 }
1602
1603 return max_bitflips;
1604}
1605
1606/*
1607 * helper to perform the actual page read operation, used by ecc->read_page(),
1608 * ecc->read_oob()
1609 */
1610static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1611 u8 *oob_buf)
1612{
1613 struct nand_chip *chip = &host->chip;
1614 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1615 struct nand_ecc_ctrl *ecc = &chip->ecc;
1616 int i, ret;
1617
Abhishek Sahubde43302017-07-19 17:17:55 +05301618 config_nand_page_read(nandc);
1619
Archit Tanejac76b78d2016-02-03 14:29:50 +05301620 /* queue cmd descs for each codeword */
1621 for (i = 0; i < ecc->steps; i++) {
1622 int data_size, oob_size;
1623
1624 if (i == (ecc->steps - 1)) {
1625 data_size = ecc->size - ((ecc->steps - 1) << 2);
1626 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1627 host->spare_bytes;
1628 } else {
1629 data_size = host->cw_data;
1630 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1631 }
1632
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301633 if (nandc->props->is_bam) {
1634 if (data_buf && oob_buf) {
1635 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1636 nandc_set_read_loc(nandc, 1, data_size,
1637 oob_size, 1);
1638 } else if (data_buf) {
1639 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1640 } else {
1641 nandc_set_read_loc(nandc, 0, data_size,
1642 oob_size, 1);
1643 }
1644 }
1645
Abhishek Sahubde43302017-07-19 17:17:55 +05301646 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301647
1648 if (data_buf)
1649 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301650 data_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301651
1652 /*
1653 * when ecc is enabled, the controller doesn't read the real
1654 * or dummy bad block markers in each chunk. To maintain a
1655 * consistent layout across RAW and ECC reads, we just
1656 * leave the real/dummy BBM offsets empty (i.e, filled with
1657 * 0xffs)
1658 */
1659 if (oob_buf) {
1660 int j;
1661
1662 for (j = 0; j < host->bbm_size; j++)
1663 *oob_buf++ = 0xff;
1664
1665 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301666 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301667 }
1668
1669 if (data_buf)
1670 data_buf += data_size;
1671 if (oob_buf)
1672 oob_buf += oob_size;
1673 }
1674
1675 ret = submit_descs(nandc);
1676 if (ret)
1677 dev_err(nandc->dev, "failure to read page/oob\n");
1678
1679 free_descs(nandc);
1680
1681 return ret;
1682}
1683
1684/*
1685 * a helper that copies the last step/codeword of a page (containing free oob)
1686 * into our local buffer
1687 */
1688static int copy_last_cw(struct qcom_nand_host *host, int page)
1689{
1690 struct nand_chip *chip = &host->chip;
1691 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1692 struct nand_ecc_ctrl *ecc = &chip->ecc;
1693 int size;
1694 int ret;
1695
1696 clear_read_regs(nandc);
1697
1698 size = host->use_ecc ? host->cw_data : host->cw_size;
1699
1700 /* prepare a clean read buffer */
1701 memset(nandc->data_buffer, 0xff, size);
1702
1703 set_address(host, host->cw_size * (ecc->steps - 1), page);
1704 update_rw_regs(host, 1, true);
1705
Abhishek Sahubde43302017-07-19 17:17:55 +05301706 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301707
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301708 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301709
1710 ret = submit_descs(nandc);
1711 if (ret)
1712 dev_err(nandc->dev, "failed to copy last codeword\n");
1713
1714 free_descs(nandc);
1715
1716 return ret;
1717}
1718
1719/* implements ecc->read_page() */
1720static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1721 uint8_t *buf, int oob_required, int page)
1722{
1723 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1724 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1725 u8 *data_buf, *oob_buf = NULL;
1726 int ret;
1727
1728 data_buf = buf;
1729 oob_buf = oob_required ? chip->oob_poi : NULL;
1730
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301731 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301732 ret = read_page_ecc(host, data_buf, oob_buf);
1733 if (ret) {
1734 dev_err(nandc->dev, "failure to read page\n");
1735 return ret;
1736 }
1737
1738 return parse_read_errors(host, data_buf, oob_buf);
1739}
1740
1741/* implements ecc->read_page_raw() */
1742static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1743 struct nand_chip *chip, uint8_t *buf,
1744 int oob_required, int page)
1745{
1746 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1747 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1748 u8 *data_buf, *oob_buf;
1749 struct nand_ecc_ctrl *ecc = &chip->ecc;
1750 int i, ret;
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301751 int read_loc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301752
1753 data_buf = buf;
1754 oob_buf = chip->oob_poi;
1755
1756 host->use_ecc = false;
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301757
1758 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301759 update_rw_regs(host, ecc->steps, true);
Abhishek Sahubde43302017-07-19 17:17:55 +05301760 config_nand_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301761
1762 for (i = 0; i < ecc->steps; i++) {
1763 int data_size1, data_size2, oob_size1, oob_size2;
1764 int reg_off = FLASH_BUF_ACC;
1765
1766 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1767 oob_size1 = host->bbm_size;
1768
1769 if (i == (ecc->steps - 1)) {
1770 data_size2 = ecc->size - data_size1 -
1771 ((ecc->steps - 1) << 2);
1772 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1773 host->spare_bytes;
1774 } else {
1775 data_size2 = host->cw_data - data_size1;
1776 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1777 }
1778
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301779 if (nandc->props->is_bam) {
1780 read_loc = 0;
1781 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1782 read_loc += data_size1;
1783
1784 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1785 read_loc += oob_size1;
1786
1787 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1788 read_loc += data_size2;
1789
1790 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1791 }
1792
Abhishek Sahubde43302017-07-19 17:17:55 +05301793 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301794
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301795 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301796 reg_off += data_size1;
1797 data_buf += data_size1;
1798
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301799 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301800 reg_off += oob_size1;
1801 oob_buf += oob_size1;
1802
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301803 read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301804 reg_off += data_size2;
1805 data_buf += data_size2;
1806
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301807 read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301808 oob_buf += oob_size2;
1809 }
1810
1811 ret = submit_descs(nandc);
1812 if (ret)
1813 dev_err(nandc->dev, "failure to read raw page\n");
1814
1815 free_descs(nandc);
1816
1817 return 0;
1818}
1819
1820/* implements ecc->read_oob() */
1821static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1822 int page)
1823{
1824 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1825 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1826 struct nand_ecc_ctrl *ecc = &chip->ecc;
1827 int ret;
1828
1829 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301830 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301831
1832 host->use_ecc = true;
1833 set_address(host, 0, page);
1834 update_rw_regs(host, ecc->steps, true);
1835
1836 ret = read_page_ecc(host, NULL, chip->oob_poi);
1837 if (ret)
1838 dev_err(nandc->dev, "failure to read oob\n");
1839
1840 return ret;
1841}
1842
1843/* implements ecc->write_page() */
1844static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1845 const uint8_t *buf, int oob_required, int page)
1846{
1847 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1848 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1849 struct nand_ecc_ctrl *ecc = &chip->ecc;
1850 u8 *data_buf, *oob_buf;
1851 int i, ret;
1852
1853 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301854 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301855
1856 data_buf = (u8 *)buf;
1857 oob_buf = chip->oob_poi;
1858
1859 host->use_ecc = true;
1860 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301861 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301862
1863 for (i = 0; i < ecc->steps; i++) {
1864 int data_size, oob_size;
1865
1866 if (i == (ecc->steps - 1)) {
1867 data_size = ecc->size - ((ecc->steps - 1) << 2);
1868 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1869 host->spare_bytes;
1870 } else {
1871 data_size = host->cw_data;
1872 oob_size = ecc->bytes;
1873 }
1874
Archit Tanejac76b78d2016-02-03 14:29:50 +05301875
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301876 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
1877 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301878
1879 /*
1880 * when ECC is enabled, we don't really need to write anything
1881 * to oob for the first n - 1 codewords since these oob regions
1882 * just contain ECC bytes that's written by the controller
1883 * itself. For the last codeword, we skip the bbm positions and
1884 * write to the free oob area.
1885 */
1886 if (i == (ecc->steps - 1)) {
1887 oob_buf += host->bbm_size;
1888
1889 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301890 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301891 }
1892
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301893 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301894
1895 data_buf += data_size;
1896 oob_buf += oob_size;
1897 }
1898
1899 ret = submit_descs(nandc);
1900 if (ret)
1901 dev_err(nandc->dev, "failure to write page\n");
1902
1903 free_descs(nandc);
1904
1905 return ret;
1906}
1907
1908/* implements ecc->write_page_raw() */
1909static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1910 struct nand_chip *chip, const uint8_t *buf,
1911 int oob_required, int page)
1912{
1913 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1914 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1915 struct nand_ecc_ctrl *ecc = &chip->ecc;
1916 u8 *data_buf, *oob_buf;
1917 int i, ret;
1918
1919 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301920 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301921
1922 data_buf = (u8 *)buf;
1923 oob_buf = chip->oob_poi;
1924
1925 host->use_ecc = false;
1926 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301927 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301928
1929 for (i = 0; i < ecc->steps; i++) {
1930 int data_size1, data_size2, oob_size1, oob_size2;
1931 int reg_off = FLASH_BUF_ACC;
1932
1933 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1934 oob_size1 = host->bbm_size;
1935
1936 if (i == (ecc->steps - 1)) {
1937 data_size2 = ecc->size - data_size1 -
1938 ((ecc->steps - 1) << 2);
1939 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1940 host->spare_bytes;
1941 } else {
1942 data_size2 = host->cw_data - data_size1;
1943 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1944 }
1945
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301946 write_data_dma(nandc, reg_off, data_buf, data_size1,
1947 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301948 reg_off += data_size1;
1949 data_buf += data_size1;
1950
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301951 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1952 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301953 reg_off += oob_size1;
1954 oob_buf += oob_size1;
1955
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301956 write_data_dma(nandc, reg_off, data_buf, data_size2,
1957 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301958 reg_off += data_size2;
1959 data_buf += data_size2;
1960
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301961 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301962 oob_buf += oob_size2;
1963
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301964 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301965 }
1966
1967 ret = submit_descs(nandc);
1968 if (ret)
1969 dev_err(nandc->dev, "failure to write raw page\n");
1970
1971 free_descs(nandc);
1972
1973 return ret;
1974}
1975
1976/*
1977 * implements ecc->write_oob()
1978 *
1979 * the NAND controller cannot write only data or only oob within a codeword,
1980 * since ecc is calculated for the combined codeword. we first copy the
1981 * entire contents for the last codeword(data + oob), replace the old oob
1982 * with the new one in chip->oob_poi, and then write the entire codeword.
1983 * this read-copy-write operation results in a slight performance loss.
1984 */
1985static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1986 int page)
1987{
1988 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1989 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1990 struct nand_ecc_ctrl *ecc = &chip->ecc;
1991 u8 *oob = chip->oob_poi;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301992 int data_size, oob_size;
1993 int ret, status = 0;
1994
1995 host->use_ecc = true;
1996
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301997 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301998 ret = copy_last_cw(host, page);
1999 if (ret)
2000 return ret;
2001
2002 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302003 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302004
2005 /* calculate the data and oob size for the last codeword/step */
2006 data_size = ecc->size - ((ecc->steps - 1) << 2);
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01002007 oob_size = mtd->oobavail;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302008
2009 /* override new oob content to last codeword */
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01002010 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2011 0, mtd->oobavail);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302012
2013 set_address(host, host->cw_size * (ecc->steps - 1), page);
2014 update_rw_regs(host, 1, false);
2015
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302016 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05302017 write_data_dma(nandc, FLASH_BUF_ACC,
2018 nandc->data_buffer, data_size + oob_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302019 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302020
2021 ret = submit_descs(nandc);
2022
2023 free_descs(nandc);
2024
2025 if (ret) {
2026 dev_err(nandc->dev, "failure to write oob\n");
2027 return -EIO;
2028 }
2029
2030 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2031
2032 status = chip->waitfunc(mtd, chip);
2033
2034 return status & NAND_STATUS_FAIL ? -EIO : 0;
2035}
2036
2037static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
2038{
2039 struct nand_chip *chip = mtd_to_nand(mtd);
2040 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2041 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2042 struct nand_ecc_ctrl *ecc = &chip->ecc;
2043 int page, ret, bbpos, bad = 0;
2044 u32 flash_status;
2045
2046 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2047
2048 /*
2049 * configure registers for a raw sub page read, the address is set to
2050 * the beginning of the last codeword, we don't care about reading ecc
2051 * portion of oob. we just want the first few bytes from this codeword
2052 * that contains the BBM
2053 */
2054 host->use_ecc = false;
2055
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302056 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302057 ret = copy_last_cw(host, page);
2058 if (ret)
2059 goto err;
2060
2061 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
2062
2063 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
2064 dev_warn(nandc->dev, "error when trying to read BBM\n");
2065 goto err;
2066 }
2067
2068 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2069
2070 bad = nandc->data_buffer[bbpos] != 0xff;
2071
2072 if (chip->options & NAND_BUSWIDTH_16)
2073 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2074err:
2075 return bad;
2076}
2077
2078static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2079{
2080 struct nand_chip *chip = mtd_to_nand(mtd);
2081 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2082 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2083 struct nand_ecc_ctrl *ecc = &chip->ecc;
2084 int page, ret, status = 0;
2085
2086 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302087 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302088
2089 /*
2090 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2091 * we don't care about the rest of the content in the codeword since
2092 * we aren't going to use this block again
2093 */
2094 memset(nandc->data_buffer, 0x00, host->cw_size);
2095
2096 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2097
2098 /* prepare write */
2099 host->use_ecc = false;
2100 set_address(host, host->cw_size * (ecc->steps - 1), page);
2101 update_rw_regs(host, 1, false);
2102
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302103 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05302104 write_data_dma(nandc, FLASH_BUF_ACC,
2105 nandc->data_buffer, host->cw_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302106 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302107
2108 ret = submit_descs(nandc);
2109
2110 free_descs(nandc);
2111
2112 if (ret) {
2113 dev_err(nandc->dev, "failure to update BBM\n");
2114 return -EIO;
2115 }
2116
2117 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2118
2119 status = chip->waitfunc(mtd, chip);
2120
2121 return status & NAND_STATUS_FAIL ? -EIO : 0;
2122}
2123
2124/*
2125 * the three functions below implement chip->read_byte(), chip->read_buf()
2126 * and chip->write_buf() respectively. these aren't used for
2127 * reading/writing page data, they are used for smaller data like reading
2128 * id, status etc
2129 */
2130static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2131{
2132 struct nand_chip *chip = mtd_to_nand(mtd);
2133 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2134 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2135 u8 *buf = nandc->data_buffer;
2136 u8 ret = 0x0;
2137
2138 if (host->last_command == NAND_CMD_STATUS) {
2139 ret = host->status;
2140
2141 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2142
2143 return ret;
2144 }
2145
2146 if (nandc->buf_start < nandc->buf_count)
2147 ret = buf[nandc->buf_start++];
2148
2149 return ret;
2150}
2151
2152static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2153{
2154 struct nand_chip *chip = mtd_to_nand(mtd);
2155 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2156 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2157
2158 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2159 nandc->buf_start += real_len;
2160}
2161
2162static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2163 int len)
2164{
2165 struct nand_chip *chip = mtd_to_nand(mtd);
2166 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2167 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2168
2169 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2170
2171 nandc->buf_start += real_len;
2172}
2173
2174/* we support only one external chip for now */
2175static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2176{
2177 struct nand_chip *chip = mtd_to_nand(mtd);
2178 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2179
2180 if (chipnr <= 0)
2181 return;
2182
2183 dev_warn(nandc->dev, "invalid chip select\n");
2184}
2185
2186/*
2187 * NAND controller page layout info
2188 *
2189 * Layout with ECC enabled:
2190 *
2191 * |----------------------| |---------------------------------|
2192 * | xx.......yy| | *********xx.......yy|
2193 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
2194 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
2195 * | xx.......yy| | *********xx.......yy|
2196 * |----------------------| |---------------------------------|
2197 * codeword 1,2..n-1 codeword n
2198 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
2199 *
2200 * n = Number of codewords in the page
2201 * . = ECC bytes
2202 * * = Spare/free bytes
2203 * x = Unused byte(s)
2204 * y = Reserved byte(s)
2205 *
2206 * 2K page: n = 4, spare = 16 bytes
2207 * 4K page: n = 8, spare = 32 bytes
2208 * 8K page: n = 16, spare = 64 bytes
2209 *
2210 * the qcom nand controller operates at a sub page/codeword level. each
2211 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2212 * the number of ECC bytes vary based on the ECC strength and the bus width.
2213 *
2214 * the first n - 1 codewords contains 516 bytes of user data, the remaining
2215 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2216 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2217 *
2218 * When we access a page with ECC enabled, the reserved bytes(s) are not
2219 * accessible at all. When reading, we fill up these unreadable positions
2220 * with 0xffs. When writing, the controller skips writing the inaccessible
2221 * bytes.
2222 *
2223 * Layout with ECC disabled:
2224 *
2225 * |------------------------------| |---------------------------------------|
2226 * | yy xx.......| | bb *********xx.......|
2227 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
2228 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
2229 * | yy xx.......| | bb *********xx.......|
2230 * |------------------------------| |---------------------------------------|
2231 * codeword 1,2..n-1 codeword n
2232 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
2233 *
2234 * n = Number of codewords in the page
2235 * . = ECC bytes
2236 * * = Spare/free bytes
2237 * x = Unused byte(s)
2238 * y = Dummy Bad Bock byte(s)
2239 * b = Real Bad Block byte(s)
2240 * size1/size2 = function of codeword size and 'n'
2241 *
2242 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2243 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2244 * Block Markers. In the last codeword, this position contains the real BBM
2245 *
2246 * In order to have a consistent layout between RAW and ECC modes, we assume
2247 * the following OOB layout arrangement:
2248 *
2249 * |-----------| |--------------------|
2250 * |yyxx.......| |bb*********xx.......|
2251 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
2252 * |yyxx.......| |bb*********xx.......|
2253 * |yyxx.......| |bb*********xx.......|
2254 * |-----------| |--------------------|
2255 * first n - 1 nth OOB region
2256 * OOB regions
2257 *
2258 * n = Number of codewords in the page
2259 * . = ECC bytes
2260 * * = FREE OOB bytes
2261 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2262 * x = Unused byte(s)
2263 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2264 *
2265 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2266 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2267 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
Boris Brezillon421e81c2016-03-18 17:54:27 +01002268 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2269 * the sum of the three).
Archit Tanejac76b78d2016-02-03 14:29:50 +05302270 */
Boris Brezillon421e81c2016-03-18 17:54:27 +01002271static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2272 struct mtd_oob_region *oobregion)
Archit Tanejac76b78d2016-02-03 14:29:50 +05302273{
Boris Brezillon421e81c2016-03-18 17:54:27 +01002274 struct nand_chip *chip = mtd_to_nand(mtd);
2275 struct qcom_nand_host *host = to_qcom_nand_host(chip);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302276 struct nand_ecc_ctrl *ecc = &chip->ecc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302277
Boris Brezillon421e81c2016-03-18 17:54:27 +01002278 if (section > 1)
2279 return -ERANGE;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302280
Boris Brezillon421e81c2016-03-18 17:54:27 +01002281 if (!section) {
2282 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2283 host->bbm_size;
2284 oobregion->offset = 0;
2285 } else {
2286 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2287 oobregion->offset = mtd->oobsize - oobregion->length;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302288 }
2289
Boris Brezillon421e81c2016-03-18 17:54:27 +01002290 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302291}
2292
Boris Brezillon421e81c2016-03-18 17:54:27 +01002293static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2294 struct mtd_oob_region *oobregion)
2295{
2296 struct nand_chip *chip = mtd_to_nand(mtd);
2297 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2298 struct nand_ecc_ctrl *ecc = &chip->ecc;
2299
2300 if (section)
2301 return -ERANGE;
2302
2303 oobregion->length = ecc->steps * 4;
2304 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2305
2306 return 0;
2307}
2308
2309static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2310 .ecc = qcom_nand_ooblayout_ecc,
2311 .free = qcom_nand_ooblayout_free,
2312};
2313
Archit Tanejac76b78d2016-02-03 14:29:50 +05302314static int qcom_nand_host_setup(struct qcom_nand_host *host)
2315{
2316 struct nand_chip *chip = &host->chip;
2317 struct mtd_info *mtd = nand_to_mtd(chip);
2318 struct nand_ecc_ctrl *ecc = &chip->ecc;
2319 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2320 int cwperpage, bad_block_byte;
2321 bool wide_bus;
2322 int ecc_mode = 1;
2323
2324 /*
2325 * the controller requires each step consists of 512 bytes of data.
2326 * bail out if DT has populated a wrong step size.
2327 */
2328 if (ecc->size != NANDC_STEP_SIZE) {
2329 dev_err(nandc->dev, "invalid ecc size\n");
2330 return -EINVAL;
2331 }
2332
2333 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2334
2335 if (ecc->strength >= 8) {
2336 /* 8 bit ECC defaults to BCH ECC on all platforms */
2337 host->bch_enabled = true;
2338 ecc_mode = 1;
2339
2340 if (wide_bus) {
2341 host->ecc_bytes_hw = 14;
2342 host->spare_bytes = 0;
2343 host->bbm_size = 2;
2344 } else {
2345 host->ecc_bytes_hw = 13;
2346 host->spare_bytes = 2;
2347 host->bbm_size = 1;
2348 }
2349 } else {
2350 /*
2351 * if the controller supports BCH for 4 bit ECC, the controller
2352 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2353 * always 10 bytes
2354 */
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302355 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
Archit Tanejac76b78d2016-02-03 14:29:50 +05302356 /* BCH */
2357 host->bch_enabled = true;
2358 ecc_mode = 0;
2359
2360 if (wide_bus) {
2361 host->ecc_bytes_hw = 8;
2362 host->spare_bytes = 2;
2363 host->bbm_size = 2;
2364 } else {
2365 host->ecc_bytes_hw = 7;
2366 host->spare_bytes = 4;
2367 host->bbm_size = 1;
2368 }
2369 } else {
2370 /* RS */
2371 host->ecc_bytes_hw = 10;
2372
2373 if (wide_bus) {
2374 host->spare_bytes = 0;
2375 host->bbm_size = 2;
2376 } else {
2377 host->spare_bytes = 1;
2378 host->bbm_size = 1;
2379 }
2380 }
2381 }
2382
2383 /*
2384 * we consider ecc->bytes as the sum of all the non-data content in a
2385 * step. It gives us a clean representation of the oob area (even if
2386 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2387 * ECC and 12 bytes for 4 bit ECC
2388 */
2389 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2390
2391 ecc->read_page = qcom_nandc_read_page;
2392 ecc->read_page_raw = qcom_nandc_read_page_raw;
2393 ecc->read_oob = qcom_nandc_read_oob;
2394 ecc->write_page = qcom_nandc_write_page;
2395 ecc->write_page_raw = qcom_nandc_write_page_raw;
2396 ecc->write_oob = qcom_nandc_write_oob;
2397
2398 ecc->mode = NAND_ECC_HW;
2399
Boris Brezillon421e81c2016-03-18 17:54:27 +01002400 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302401
2402 cwperpage = mtd->writesize / ecc->size;
Abhishek Sahucb80f112017-08-17 17:37:40 +05302403 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2404 cwperpage);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302405
2406 /*
2407 * DATA_UD_BYTES varies based on whether the read/write command protects
2408 * spare data with ECC too. We protect spare data by default, so we set
2409 * it to main + spare data, which are 512 and 4 bytes respectively.
2410 */
2411 host->cw_data = 516;
2412
2413 /*
2414 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2415 * for 8 bit ECC
2416 */
2417 host->cw_size = host->cw_data + ecc->bytes;
2418
2419 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
2420 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
2421 return -EINVAL;
2422 }
2423
2424 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2425
2426 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2427 | host->cw_data << UD_SIZE_BYTES
2428 | 0 << DISABLE_STATUS_AFTER_WRITE
2429 | 5 << NUM_ADDR_CYCLES
2430 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2431 | 0 << STATUS_BFR_READ
2432 | 1 << SET_RD_MODE_AFTER_STATUS
2433 | host->spare_bytes << SPARE_SIZE_BYTES;
2434
2435 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2436 | 0 << CS_ACTIVE_BSY
2437 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2438 | 0 << BAD_BLOCK_IN_SPARE_AREA
2439 | 2 << WR_RD_BSY_GAP
2440 | wide_bus << WIDE_FLASH
2441 | host->bch_enabled << ENABLE_BCH_ECC;
2442
2443 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2444 | host->cw_size << UD_SIZE_BYTES
2445 | 5 << NUM_ADDR_CYCLES
2446 | 0 << SPARE_SIZE_BYTES;
2447
2448 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2449 | 0 << CS_ACTIVE_BSY
2450 | 17 << BAD_BLOCK_BYTE_NUM
2451 | 1 << BAD_BLOCK_IN_SPARE_AREA
2452 | 2 << WR_RD_BSY_GAP
2453 | wide_bus << WIDE_FLASH
2454 | 1 << DEV0_CFG1_ECC_DISABLE;
2455
Abhishek Sahu10777de2017-08-03 17:56:39 +02002456 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
Archit Tanejac76b78d2016-02-03 14:29:50 +05302457 | 0 << ECC_SW_RESET
2458 | host->cw_data << ECC_NUM_DATA_BYTES
2459 | 1 << ECC_FORCE_CLK_OPEN
2460 | ecc_mode << ECC_MODE
2461 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2462
2463 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2464
2465 host->clrflashstatus = FS_READY_BSY_N;
2466 host->clrreadstatus = 0xc0;
Abhishek Sahua86b9c42017-08-17 17:37:44 +05302467 nandc->regs->erased_cw_detect_cfg_clr =
2468 cpu_to_le32(CLR_ERASED_PAGE_DET);
2469 nandc->regs->erased_cw_detect_cfg_set =
2470 cpu_to_le32(SET_ERASED_PAGE_DET);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302471
2472 dev_dbg(nandc->dev,
2473 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2474 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2475 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2476 cwperpage);
2477
2478 return 0;
2479}
2480
2481static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2482{
2483 int ret;
2484
2485 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2486 if (ret) {
2487 dev_err(nandc->dev, "failed to set DMA mask\n");
2488 return ret;
2489 }
2490
2491 /*
2492 * we use the internal buffer for reading ONFI params, reading small
2493 * data like ID and status, and preforming read-copy-write operations
2494 * when writing to a codeword partially. 532 is the maximum possible
2495 * size of a codeword for our nand controller
2496 */
2497 nandc->buf_size = 532;
2498
2499 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2500 GFP_KERNEL);
2501 if (!nandc->data_buffer)
2502 return -ENOMEM;
2503
2504 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2505 GFP_KERNEL);
2506 if (!nandc->regs)
2507 return -ENOMEM;
2508
2509 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
2510 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
2511 GFP_KERNEL);
2512 if (!nandc->reg_read_buf)
2513 return -ENOMEM;
2514
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302515 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302516 nandc->reg_read_dma =
2517 dma_map_single(nandc->dev, nandc->reg_read_buf,
2518 MAX_REG_RD *
2519 sizeof(*nandc->reg_read_buf),
2520 DMA_FROM_DEVICE);
2521 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2522 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2523 return -EIO;
2524 }
2525
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302526 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2527 if (!nandc->tx_chan) {
2528 dev_err(nandc->dev, "failed to request tx channel\n");
2529 return -ENODEV;
2530 }
2531
2532 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2533 if (!nandc->rx_chan) {
2534 dev_err(nandc->dev, "failed to request rx channel\n");
2535 return -ENODEV;
2536 }
2537
2538 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2539 if (!nandc->cmd_chan) {
2540 dev_err(nandc->dev, "failed to request cmd channel\n");
2541 return -ENODEV;
2542 }
Abhishek Sahucb80f112017-08-17 17:37:40 +05302543
2544 /*
2545 * Initially allocate BAM transaction to read ONFI param page.
2546 * After detecting all the devices, this BAM transaction will
2547 * be freed and the next BAM tranasction will be allocated with
2548 * maximum codeword size
2549 */
2550 nandc->max_cwperpage = 1;
2551 nandc->bam_txn = alloc_bam_transaction(nandc);
2552 if (!nandc->bam_txn) {
2553 dev_err(nandc->dev,
2554 "failed to allocate bam transaction\n");
2555 return -ENOMEM;
2556 }
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302557 } else {
2558 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2559 if (!nandc->chan) {
2560 dev_err(nandc->dev,
2561 "failed to request slave channel\n");
2562 return -ENODEV;
2563 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302564 }
2565
2566 INIT_LIST_HEAD(&nandc->desc_list);
2567 INIT_LIST_HEAD(&nandc->host_list);
2568
Marc Gonzalezd45bc582016-07-27 11:23:52 +02002569 nand_hw_control_init(&nandc->controller);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302570
2571 return 0;
2572}
2573
2574static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2575{
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302576 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302577 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2578 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2579 MAX_REG_RD *
2580 sizeof(*nandc->reg_read_buf),
2581 DMA_FROM_DEVICE);
2582
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302583 if (nandc->tx_chan)
2584 dma_release_channel(nandc->tx_chan);
2585
2586 if (nandc->rx_chan)
2587 dma_release_channel(nandc->rx_chan);
2588
2589 if (nandc->cmd_chan)
2590 dma_release_channel(nandc->cmd_chan);
2591 } else {
2592 if (nandc->chan)
2593 dma_release_channel(nandc->chan);
2594 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302595}
2596
2597/* one time setup of a few nand controller registers */
2598static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2599{
Abhishek Sahu9d43f912017-08-17 17:37:45 +05302600 u32 nand_ctrl;
2601
Archit Tanejac76b78d2016-02-03 14:29:50 +05302602 /* kill onenand */
2603 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
Abhishek Sahucc409b92017-08-17 17:37:47 +05302604 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2605 NAND_DEV_CMD_VLD_VAL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302606
Abhishek Sahu9d43f912017-08-17 17:37:45 +05302607 /* enable ADM or BAM DMA */
2608 if (nandc->props->is_bam) {
2609 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2610 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2611 } else {
2612 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2613 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302614
2615 /* save the original values of these registers */
Abhishek Sahucc409b92017-08-17 17:37:47 +05302616 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
Abhishek Sahud8a9b322017-08-11 17:09:16 +05302617 nandc->vld = NAND_DEV_CMD_VLD_VAL;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302618
2619 return 0;
2620}
2621
2622static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2623 struct qcom_nand_host *host,
2624 struct device_node *dn)
2625{
2626 struct nand_chip *chip = &host->chip;
2627 struct mtd_info *mtd = nand_to_mtd(chip);
2628 struct device *dev = nandc->dev;
2629 int ret;
2630
2631 ret = of_property_read_u32(dn, "reg", &host->cs);
2632 if (ret) {
2633 dev_err(dev, "can't get chip-select\n");
2634 return -ENXIO;
2635 }
2636
2637 nand_set_flash_node(chip, dn);
2638 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2639 mtd->owner = THIS_MODULE;
2640 mtd->dev.parent = dev;
2641
2642 chip->cmdfunc = qcom_nandc_command;
2643 chip->select_chip = qcom_nandc_select_chip;
2644 chip->read_byte = qcom_nandc_read_byte;
2645 chip->read_buf = qcom_nandc_read_buf;
2646 chip->write_buf = qcom_nandc_write_buf;
Boris Brezillon4a78cc62017-05-26 17:10:15 +02002647 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
2648 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302649
2650 /*
2651 * the bad block marker is readable only when we read the last codeword
2652 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2653 * helpers don't allow us to read BB from a nand chip with ECC
2654 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2655 * and block_markbad helpers until we permanently switch to using
2656 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2657 */
2658 chip->block_bad = qcom_nandc_block_bad;
2659 chip->block_markbad = qcom_nandc_block_markbad;
2660
2661 chip->controller = &nandc->controller;
2662 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2663 NAND_SKIP_BBTSCAN;
2664
2665 /* set up initial status value */
2666 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2667
2668 ret = nand_scan_ident(mtd, 1, NULL);
2669 if (ret)
2670 return ret;
2671
2672 ret = qcom_nand_host_setup(host);
Abhishek Sahu89f51272017-07-19 17:17:58 +05302673
2674 return ret;
2675}
2676
2677static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
2678 struct qcom_nand_host *host,
2679 struct device_node *dn)
2680{
2681 struct nand_chip *chip = &host->chip;
2682 struct mtd_info *mtd = nand_to_mtd(chip);
2683 int ret;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302684
2685 ret = nand_scan_tail(mtd);
2686 if (ret)
2687 return ret;
2688
Abhishek Sahu89f51272017-07-19 17:17:58 +05302689 ret = mtd_device_register(mtd, NULL, 0);
2690 if (ret)
2691 nand_cleanup(mtd_to_nand(mtd));
2692
2693 return ret;
2694}
2695
2696static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2697{
2698 struct device *dev = nandc->dev;
2699 struct device_node *dn = dev->of_node, *child;
2700 struct qcom_nand_host *host, *tmp;
2701 int ret;
2702
2703 for_each_available_child_of_node(dn, child) {
2704 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2705 if (!host) {
2706 of_node_put(child);
2707 return -ENOMEM;
2708 }
2709
2710 ret = qcom_nand_host_init(nandc, host, child);
2711 if (ret) {
2712 devm_kfree(dev, host);
2713 continue;
2714 }
2715
2716 list_add_tail(&host->node, &nandc->host_list);
2717 }
2718
2719 if (list_empty(&nandc->host_list))
2720 return -ENODEV;
2721
Abhishek Sahucb80f112017-08-17 17:37:40 +05302722 if (nandc->props->is_bam) {
2723 free_bam_transaction(nandc);
2724 nandc->bam_txn = alloc_bam_transaction(nandc);
2725 if (!nandc->bam_txn) {
2726 dev_err(nandc->dev,
2727 "failed to allocate bam transaction\n");
2728 return -ENOMEM;
2729 }
2730 }
2731
Abhishek Sahu89f51272017-07-19 17:17:58 +05302732 list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
2733 ret = qcom_nand_mtd_register(nandc, host, child);
2734 if (ret) {
2735 list_del(&host->node);
2736 devm_kfree(dev, host);
2737 }
2738 }
2739
2740 if (list_empty(&nandc->host_list))
2741 return -ENODEV;
2742
2743 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302744}
2745
2746/* parse custom DT properties here */
2747static int qcom_nandc_parse_dt(struct platform_device *pdev)
2748{
2749 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2750 struct device_node *np = nandc->dev->of_node;
2751 int ret;
2752
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302753 if (!nandc->props->is_bam) {
2754 ret = of_property_read_u32(np, "qcom,cmd-crci",
2755 &nandc->cmd_crci);
2756 if (ret) {
2757 dev_err(nandc->dev, "command CRCI unspecified\n");
2758 return ret;
2759 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302760
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302761 ret = of_property_read_u32(np, "qcom,data-crci",
2762 &nandc->data_crci);
2763 if (ret) {
2764 dev_err(nandc->dev, "data CRCI unspecified\n");
2765 return ret;
2766 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302767 }
2768
2769 return 0;
2770}
2771
2772static int qcom_nandc_probe(struct platform_device *pdev)
2773{
2774 struct qcom_nand_controller *nandc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302775 const void *dev_data;
2776 struct device *dev = &pdev->dev;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302777 struct resource *res;
2778 int ret;
2779
2780 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2781 if (!nandc)
2782 return -ENOMEM;
2783
2784 platform_set_drvdata(pdev, nandc);
2785 nandc->dev = dev;
2786
2787 dev_data = of_device_get_match_data(dev);
2788 if (!dev_data) {
2789 dev_err(&pdev->dev, "failed to get device data\n");
2790 return -ENODEV;
2791 }
2792
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302793 nandc->props = dev_data;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302794
2795 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2796 nandc->base = devm_ioremap_resource(dev, res);
2797 if (IS_ERR(nandc->base))
2798 return PTR_ERR(nandc->base);
2799
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +05302800 nandc->base_phys = res->start;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302801 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2802
2803 nandc->core_clk = devm_clk_get(dev, "core");
2804 if (IS_ERR(nandc->core_clk))
2805 return PTR_ERR(nandc->core_clk);
2806
2807 nandc->aon_clk = devm_clk_get(dev, "aon");
2808 if (IS_ERR(nandc->aon_clk))
2809 return PTR_ERR(nandc->aon_clk);
2810
2811 ret = qcom_nandc_parse_dt(pdev);
2812 if (ret)
2813 return ret;
2814
2815 ret = qcom_nandc_alloc(nandc);
2816 if (ret)
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302817 goto err_core_clk;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302818
2819 ret = clk_prepare_enable(nandc->core_clk);
2820 if (ret)
2821 goto err_core_clk;
2822
2823 ret = clk_prepare_enable(nandc->aon_clk);
2824 if (ret)
2825 goto err_aon_clk;
2826
2827 ret = qcom_nandc_setup(nandc);
2828 if (ret)
2829 goto err_setup;
2830
Abhishek Sahu89f51272017-07-19 17:17:58 +05302831 ret = qcom_probe_nand_devices(nandc);
2832 if (ret)
2833 goto err_setup;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302834
2835 return 0;
2836
Archit Tanejac76b78d2016-02-03 14:29:50 +05302837err_setup:
2838 clk_disable_unprepare(nandc->aon_clk);
2839err_aon_clk:
2840 clk_disable_unprepare(nandc->core_clk);
2841err_core_clk:
2842 qcom_nandc_unalloc(nandc);
2843
2844 return ret;
2845}
2846
2847static int qcom_nandc_remove(struct platform_device *pdev)
2848{
2849 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2850 struct qcom_nand_host *host;
2851
2852 list_for_each_entry(host, &nandc->host_list, node)
2853 nand_release(nand_to_mtd(&host->chip));
2854
2855 qcom_nandc_unalloc(nandc);
2856
2857 clk_disable_unprepare(nandc->aon_clk);
2858 clk_disable_unprepare(nandc->core_clk);
2859
2860 return 0;
2861}
2862
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302863static const struct qcom_nandc_props ipq806x_nandc_props = {
2864 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +05302865 .is_bam = false,
Abhishek Sahucc409b92017-08-17 17:37:47 +05302866 .dev_cmd_reg_start = 0x0,
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302867};
Archit Tanejac76b78d2016-02-03 14:29:50 +05302868
Abhishek Sahua0637832017-08-17 17:37:53 +05302869static const struct qcom_nandc_props ipq4019_nandc_props = {
2870 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2871 .is_bam = true,
2872 .dev_cmd_reg_start = 0x0,
2873};
2874
Abhishek Sahudce84762017-08-17 17:37:54 +05302875static const struct qcom_nandc_props ipq8074_nandc_props = {
2876 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2877 .is_bam = true,
2878 .dev_cmd_reg_start = 0x7000,
2879};
2880
Archit Tanejac76b78d2016-02-03 14:29:50 +05302881/*
2882 * data will hold a struct pointer containing more differences once we support
2883 * more controller variants
2884 */
2885static const struct of_device_id qcom_nandc_of_match[] = {
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302886 {
2887 .compatible = "qcom,ipq806x-nand",
2888 .data = &ipq806x_nandc_props,
Archit Tanejac76b78d2016-02-03 14:29:50 +05302889 },
Abhishek Sahua0637832017-08-17 17:37:53 +05302890 {
2891 .compatible = "qcom,ipq4019-nand",
2892 .data = &ipq4019_nandc_props,
2893 },
Abhishek Sahudce84762017-08-17 17:37:54 +05302894 {
2895 .compatible = "qcom,ipq8074-nand",
2896 .data = &ipq8074_nandc_props,
2897 },
Archit Tanejac76b78d2016-02-03 14:29:50 +05302898 {}
2899};
2900MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2901
2902static struct platform_driver qcom_nandc_driver = {
2903 .driver = {
2904 .name = "qcom-nandc",
2905 .of_match_table = qcom_nandc_of_match,
2906 },
2907 .probe = qcom_nandc_probe,
2908 .remove = qcom_nandc_remove,
2909};
2910module_platform_driver(qcom_nandc_driver);
2911
2912MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2913MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2914MODULE_LICENSE("GPL v2");