blob: 411cdfd12a85ed8db14309c79d66cdf9ea04b5d6 [file] [log] [blame]
Archit Tanejac76b78d2016-02-03 14:29:50 +05301/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
Boris Brezillond4092d72017-08-04 17:29:10 +020020#include <linux/mtd/rawnand.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053021#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053024#include <linux/delay.h>
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +053025#include <linux/dma/qcom_bam_dma.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010026#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
Archit Tanejac76b78d2016-02-03 14:29:50 +053027
28/* NANDc reg offsets */
29#define NAND_FLASH_CMD 0x00
30#define NAND_ADDR0 0x04
31#define NAND_ADDR1 0x08
32#define NAND_FLASH_CHIP_SELECT 0x0c
33#define NAND_EXEC_CMD 0x10
34#define NAND_FLASH_STATUS 0x14
35#define NAND_BUFFER_STATUS 0x18
36#define NAND_DEV0_CFG0 0x20
37#define NAND_DEV0_CFG1 0x24
38#define NAND_DEV0_ECC_CFG 0x28
39#define NAND_DEV1_ECC_CFG 0x2c
40#define NAND_DEV1_CFG0 0x30
41#define NAND_DEV1_CFG1 0x34
42#define NAND_READ_ID 0x40
43#define NAND_READ_STATUS 0x44
44#define NAND_DEV_CMD0 0xa0
45#define NAND_DEV_CMD1 0xa4
46#define NAND_DEV_CMD2 0xa8
47#define NAND_DEV_CMD_VLD 0xac
48#define SFLASHC_BURST_CFG 0xe0
49#define NAND_ERASED_CW_DETECT_CFG 0xe8
50#define NAND_ERASED_CW_DETECT_STATUS 0xec
51#define NAND_EBI2_ECC_BUF_CFG 0xf0
52#define FLASH_BUF_ACC 0x100
53
54#define NAND_CTRL 0xf00
55#define NAND_VERSION 0xf08
56#define NAND_READ_LOCATION_0 0xf20
57#define NAND_READ_LOCATION_1 0xf24
Abhishek Sahu91af95c2017-08-17 17:37:43 +053058#define NAND_READ_LOCATION_2 0xf28
59#define NAND_READ_LOCATION_3 0xf2c
Archit Tanejac76b78d2016-02-03 14:29:50 +053060
61/* dummy register offsets, used by write_reg_dma */
62#define NAND_DEV_CMD1_RESTORE 0xdead
63#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
64
65/* NAND_FLASH_CMD bits */
66#define PAGE_ACC BIT(4)
67#define LAST_PAGE BIT(5)
68
69/* NAND_FLASH_CHIP_SELECT bits */
70#define NAND_DEV_SEL 0
71#define DM_EN BIT(2)
72
73/* NAND_FLASH_STATUS bits */
74#define FS_OP_ERR BIT(4)
75#define FS_READY_BSY_N BIT(5)
76#define FS_MPU_ERR BIT(8)
77#define FS_DEVICE_STS_ERR BIT(16)
78#define FS_DEVICE_WP BIT(23)
79
80/* NAND_BUFFER_STATUS bits */
81#define BS_UNCORRECTABLE_BIT BIT(8)
82#define BS_CORRECTABLE_ERR_MSK 0x1f
83
84/* NAND_DEVn_CFG0 bits */
85#define DISABLE_STATUS_AFTER_WRITE 4
86#define CW_PER_PAGE 6
87#define UD_SIZE_BYTES 9
88#define ECC_PARITY_SIZE_BYTES_RS 19
89#define SPARE_SIZE_BYTES 23
90#define NUM_ADDR_CYCLES 27
91#define STATUS_BFR_READ 30
92#define SET_RD_MODE_AFTER_STATUS 31
93
94/* NAND_DEVn_CFG0 bits */
95#define DEV0_CFG1_ECC_DISABLE 0
96#define WIDE_FLASH 1
97#define NAND_RECOVERY_CYCLES 2
98#define CS_ACTIVE_BSY 5
99#define BAD_BLOCK_BYTE_NUM 6
100#define BAD_BLOCK_IN_SPARE_AREA 16
101#define WR_RD_BSY_GAP 17
102#define ENABLE_BCH_ECC 27
103
104/* NAND_DEV0_ECC_CFG bits */
105#define ECC_CFG_ECC_DISABLE 0
106#define ECC_SW_RESET 1
107#define ECC_MODE 4
108#define ECC_PARITY_SIZE_BYTES_BCH 8
109#define ECC_NUM_DATA_BYTES 16
110#define ECC_FORCE_CLK_OPEN 30
111
112/* NAND_DEV_CMD1 bits */
113#define READ_ADDR 0
114
115/* NAND_DEV_CMD_VLD bits */
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530116#define READ_START_VLD BIT(0)
117#define READ_STOP_VLD BIT(1)
118#define WRITE_START_VLD BIT(2)
119#define ERASE_START_VLD BIT(3)
120#define SEQ_READ_START_VLD BIT(4)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530121
122/* NAND_EBI2_ECC_BUF_CFG bits */
123#define NUM_STEPS 0
124
125/* NAND_ERASED_CW_DETECT_CFG bits */
126#define ERASED_CW_ECC_MASK 1
127#define AUTO_DETECT_RES 0
128#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
129#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
130#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
131#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
132#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
133
134/* NAND_ERASED_CW_DETECT_STATUS bits */
135#define PAGE_ALL_ERASED BIT(7)
136#define CODEWORD_ALL_ERASED BIT(6)
137#define PAGE_ERASED BIT(5)
138#define CODEWORD_ERASED BIT(4)
139#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
140#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
141
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530142/* NAND_READ_LOCATION_n bits */
143#define READ_LOCATION_OFFSET 0
144#define READ_LOCATION_SIZE 16
145#define READ_LOCATION_LAST 31
146
Archit Tanejac76b78d2016-02-03 14:29:50 +0530147/* Version Mask */
148#define NAND_VERSION_MAJOR_MASK 0xf0000000
149#define NAND_VERSION_MAJOR_SHIFT 28
150#define NAND_VERSION_MINOR_MASK 0x0fff0000
151#define NAND_VERSION_MINOR_SHIFT 16
152
153/* NAND OP_CMDs */
154#define PAGE_READ 0x2
155#define PAGE_READ_WITH_ECC 0x3
156#define PAGE_READ_WITH_ECC_SPARE 0x4
157#define PROGRAM_PAGE 0x6
158#define PAGE_PROGRAM_WITH_ECC 0x7
159#define PROGRAM_PAGE_SPARE 0x9
160#define BLOCK_ERASE 0xa
161#define FETCH_ID 0xb
162#define RESET_DEVICE 0xd
163
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530164/* Default Value for NAND_DEV_CMD_VLD */
165#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
166 ERASE_START_VLD | SEQ_READ_START_VLD)
167
Abhishek Sahu9d43f912017-08-17 17:37:45 +0530168/* NAND_CTRL bits */
169#define BAM_MODE_EN BIT(0)
170
Archit Tanejac76b78d2016-02-03 14:29:50 +0530171/*
172 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
173 * the driver calls the chunks 'step' or 'codeword' interchangeably
174 */
175#define NANDC_STEP_SIZE 512
176
177/*
178 * the largest page size we support is 8K, this will have 16 steps/codewords
179 * of 512 bytes each
180 */
181#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
182
183/* we read at most 3 registers per codeword scan */
184#define MAX_REG_RD (3 * MAX_NUM_STEPS)
185
186/* ECC modes supported by the controller */
187#define ECC_NONE BIT(0)
188#define ECC_RS_4BIT BIT(1)
189#define ECC_BCH_4BIT BIT(2)
190#define ECC_BCH_8BIT BIT(3)
191
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530192#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
193nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
194 ((offset) << READ_LOCATION_OFFSET) | \
195 ((size) << READ_LOCATION_SIZE) | \
196 ((is_last) << READ_LOCATION_LAST))
197
Abhishek Sahucc409b92017-08-17 17:37:47 +0530198/*
199 * Returns the actual register address for all NAND_DEV_ registers
200 * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
201 */
202#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
203
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530204/* Returns the NAND register physical address */
205#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
206
207/* Returns the dma address for reg read buffer */
208#define reg_buf_dma_addr(chip, vaddr) \
209 ((chip)->reg_read_dma + \
210 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
211
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530212#define QPIC_PER_CW_CMD_ELEMENTS 32
Abhishek Sahucb80f112017-08-17 17:37:40 +0530213#define QPIC_PER_CW_CMD_SGL 32
214#define QPIC_PER_CW_DATA_SGL 8
215
216/*
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530217 * Flags used in DMA descriptor preparation helper functions
218 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
219 */
220/* Don't set the EOT in current tx BAM sgl */
221#define NAND_BAM_NO_EOT BIT(0)
222/* Set the NWD flag in current BAM sgl */
223#define NAND_BAM_NWD BIT(1)
224/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
225#define NAND_BAM_NEXT_SGL BIT(2)
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530226/*
227 * Erased codeword status is being used two times in single transfer so this
228 * flag will determine the current value of erased codeword status register
229 */
230#define NAND_ERASED_CW_SET BIT(4)
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530231
232/*
Abhishek Sahucb80f112017-08-17 17:37:40 +0530233 * This data type corresponds to the BAM transaction which will be used for all
234 * NAND transfers.
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530235 * @bam_ce - the array of BAM command elements
Abhishek Sahucb80f112017-08-17 17:37:40 +0530236 * @cmd_sgl - sgl for NAND BAM command pipe
237 * @data_sgl - sgl for NAND BAM consumer/producer pipe
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530238 * @bam_ce_pos - the index in bam_ce which is available for next sgl
239 * @bam_ce_start - the index in bam_ce which marks the start position ce
240 * for current sgl. It will be used for size calculation
241 * for current sgl
Abhishek Sahucb80f112017-08-17 17:37:40 +0530242 * @cmd_sgl_pos - current index in command sgl.
243 * @cmd_sgl_start - start index in command sgl.
244 * @tx_sgl_pos - current index in data sgl for tx.
245 * @tx_sgl_start - start index in data sgl for tx.
246 * @rx_sgl_pos - current index in data sgl for rx.
247 * @rx_sgl_start - start index in data sgl for rx.
248 */
249struct bam_transaction {
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530250 struct bam_cmd_element *bam_ce;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530251 struct scatterlist *cmd_sgl;
252 struct scatterlist *data_sgl;
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530253 u32 bam_ce_pos;
254 u32 bam_ce_start;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530255 u32 cmd_sgl_pos;
256 u32 cmd_sgl_start;
257 u32 tx_sgl_pos;
258 u32 tx_sgl_start;
259 u32 rx_sgl_pos;
260 u32 rx_sgl_start;
261};
262
Abhishek Sahu381dd242017-08-17 17:37:41 +0530263/*
264 * This data type corresponds to the nand dma descriptor
265 * @list - list for desc_info
266 * @dir - DMA transfer direction
267 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
268 * ADM
269 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
270 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
271 * @dma_desc - low level DMA engine descriptor
272 */
Archit Tanejac76b78d2016-02-03 14:29:50 +0530273struct desc_info {
274 struct list_head node;
275
276 enum dma_data_direction dir;
Abhishek Sahu381dd242017-08-17 17:37:41 +0530277 union {
278 struct scatterlist adm_sgl;
279 struct {
280 struct scatterlist *bam_sgl;
281 int sgl_cnt;
282 };
283 };
Archit Tanejac76b78d2016-02-03 14:29:50 +0530284 struct dma_async_tx_descriptor *dma_desc;
285};
286
287/*
288 * holds the current register values that we want to write. acts as a contiguous
289 * chunk of memory which we use to write the controller registers through DMA.
290 */
291struct nandc_regs {
292 __le32 cmd;
293 __le32 addr0;
294 __le32 addr1;
295 __le32 chip_sel;
296 __le32 exec;
297
298 __le32 cfg0;
299 __le32 cfg1;
300 __le32 ecc_bch_cfg;
301
302 __le32 clrflashstatus;
303 __le32 clrreadstatus;
304
305 __le32 cmd1;
306 __le32 vld;
307
308 __le32 orig_cmd1;
309 __le32 orig_vld;
310
311 __le32 ecc_buf_cfg;
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530312 __le32 read_location0;
313 __le32 read_location1;
314 __le32 read_location2;
315 __le32 read_location3;
316
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530317 __le32 erased_cw_detect_cfg_clr;
318 __le32 erased_cw_detect_cfg_set;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530319};
320
321/*
322 * NAND controller data struct
323 *
324 * @controller: base controller structure
325 * @host_list: list containing all the chips attached to the
326 * controller
327 * @dev: parent device
328 * @base: MMIO base
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530329 * @base_phys: physical base address of controller registers
330 * @base_dma: dma base address of controller registers
Archit Tanejac76b78d2016-02-03 14:29:50 +0530331 * @core_clk: controller clock
332 * @aon_clk: another controller clock
333 *
334 * @chan: dma channel
335 * @cmd_crci: ADM DMA CRCI for command flow control
336 * @data_crci: ADM DMA CRCI for data flow control
337 * @desc_list: DMA descriptor list (list of desc_infos)
338 *
339 * @data_buffer: our local DMA buffer for page read/writes,
340 * used when we can't use the buffer provided
341 * by upper layers directly
342 * @buf_size/count/start: markers for chip->read_buf/write_buf functions
343 * @reg_read_buf: local buffer for reading back registers via DMA
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530344 * @reg_read_dma: contains dma address for register read buffer
Archit Tanejac76b78d2016-02-03 14:29:50 +0530345 * @reg_read_pos: marker for data read in reg_read_buf
346 *
347 * @regs: a contiguous chunk of memory for DMA register
348 * writes. contains the register values to be
349 * written to controller
350 * @cmd1/vld: some fixed controller register values
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530351 * @props: properties of current NAND controller,
Archit Tanejac76b78d2016-02-03 14:29:50 +0530352 * initialized via DT match data
Abhishek Sahucb80f112017-08-17 17:37:40 +0530353 * @max_cwperpage: maximum QPIC codewords required. calculated
354 * from all connected NAND devices pagesize
Archit Tanejac76b78d2016-02-03 14:29:50 +0530355 */
356struct qcom_nand_controller {
357 struct nand_hw_control controller;
358 struct list_head host_list;
359
360 struct device *dev;
361
362 void __iomem *base;
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530363 phys_addr_t base_phys;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530364 dma_addr_t base_dma;
365
366 struct clk *core_clk;
367 struct clk *aon_clk;
368
Abhishek Sahu497d7d82017-08-11 17:09:19 +0530369 union {
370 /* will be used only by QPIC for BAM DMA */
371 struct {
372 struct dma_chan *tx_chan;
373 struct dma_chan *rx_chan;
374 struct dma_chan *cmd_chan;
375 };
376
377 /* will be used only by EBI2 for ADM DMA */
378 struct {
379 struct dma_chan *chan;
380 unsigned int cmd_crci;
381 unsigned int data_crci;
382 };
383 };
384
Archit Tanejac76b78d2016-02-03 14:29:50 +0530385 struct list_head desc_list;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530386 struct bam_transaction *bam_txn;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530387
388 u8 *data_buffer;
389 int buf_size;
390 int buf_count;
391 int buf_start;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530392 unsigned int max_cwperpage;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530393
394 __le32 *reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530395 dma_addr_t reg_read_dma;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530396 int reg_read_pos;
397
398 struct nandc_regs *regs;
399
400 u32 cmd1, vld;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530401 const struct qcom_nandc_props *props;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530402};
403
404/*
405 * NAND chip structure
406 *
407 * @chip: base NAND chip structure
408 * @node: list node to add itself to host_list in
409 * qcom_nand_controller
410 *
411 * @cs: chip select value for this chip
412 * @cw_size: the number of bytes in a single step/codeword
413 * of a page, consisting of all data, ecc, spare
414 * and reserved bytes
415 * @cw_data: the number of bytes within a codeword protected
416 * by ECC
417 * @use_ecc: request the controller to use ECC for the
418 * upcoming read/write
419 * @bch_enabled: flag to tell whether BCH ECC mode is used
420 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
421 * chip
422 * @status: value to be returned if NAND_CMD_STATUS command
423 * is executed
424 * @last_command: keeps track of last command on this chip. used
425 * for reading correct status
426 *
427 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
428 * ecc/non-ecc mode for the current nand flash
429 * device
430 */
431struct qcom_nand_host {
432 struct nand_chip chip;
433 struct list_head node;
434
435 int cs;
436 int cw_size;
437 int cw_data;
438 bool use_ecc;
439 bool bch_enabled;
440 int ecc_bytes_hw;
441 int spare_bytes;
442 int bbm_size;
443 u8 status;
444 int last_command;
445
446 u32 cfg0, cfg1;
447 u32 cfg0_raw, cfg1_raw;
448 u32 ecc_buf_cfg;
449 u32 ecc_bch_cfg;
450 u32 clrflashstatus;
451 u32 clrreadstatus;
452};
453
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530454/*
455 * This data type corresponds to the NAND controller properties which varies
456 * among different NAND controllers.
457 * @ecc_modes - ecc mode for NAND
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530458 * @is_bam - whether NAND controller is using BAM
Abhishek Sahucc409b92017-08-17 17:37:47 +0530459 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530460 */
461struct qcom_nandc_props {
462 u32 ecc_modes;
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530463 bool is_bam;
Abhishek Sahucc409b92017-08-17 17:37:47 +0530464 u32 dev_cmd_reg_start;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530465};
466
Abhishek Sahucb80f112017-08-17 17:37:40 +0530467/* Frees the BAM transaction memory */
468static void free_bam_transaction(struct qcom_nand_controller *nandc)
469{
470 struct bam_transaction *bam_txn = nandc->bam_txn;
471
472 devm_kfree(nandc->dev, bam_txn);
473}
474
475/* Allocates and Initializes the BAM transaction */
476static struct bam_transaction *
477alloc_bam_transaction(struct qcom_nand_controller *nandc)
478{
479 struct bam_transaction *bam_txn;
480 size_t bam_txn_size;
481 unsigned int num_cw = nandc->max_cwperpage;
482 void *bam_txn_buf;
483
484 bam_txn_size =
485 sizeof(*bam_txn) + num_cw *
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530486 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
487 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
Abhishek Sahucb80f112017-08-17 17:37:40 +0530488 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
489
490 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
491 if (!bam_txn_buf)
492 return NULL;
493
494 bam_txn = bam_txn_buf;
495 bam_txn_buf += sizeof(*bam_txn);
496
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530497 bam_txn->bam_ce = bam_txn_buf;
498 bam_txn_buf +=
499 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
500
Abhishek Sahucb80f112017-08-17 17:37:40 +0530501 bam_txn->cmd_sgl = bam_txn_buf;
502 bam_txn_buf +=
503 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
504
505 bam_txn->data_sgl = bam_txn_buf;
506
507 return bam_txn;
508}
509
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530510/* Clears the BAM transaction indexes */
511static void clear_bam_transaction(struct qcom_nand_controller *nandc)
512{
513 struct bam_transaction *bam_txn = nandc->bam_txn;
514
515 if (!nandc->props->is_bam)
516 return;
517
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530518 bam_txn->bam_ce_pos = 0;
519 bam_txn->bam_ce_start = 0;
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530520 bam_txn->cmd_sgl_pos = 0;
521 bam_txn->cmd_sgl_start = 0;
522 bam_txn->tx_sgl_pos = 0;
523 bam_txn->tx_sgl_start = 0;
524 bam_txn->rx_sgl_pos = 0;
525 bam_txn->rx_sgl_start = 0;
526
527 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
528 QPIC_PER_CW_CMD_SGL);
529 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
530 QPIC_PER_CW_DATA_SGL);
531}
532
Archit Tanejac76b78d2016-02-03 14:29:50 +0530533static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
534{
535 return container_of(chip, struct qcom_nand_host, chip);
536}
537
538static inline struct qcom_nand_controller *
539get_qcom_nand_controller(struct nand_chip *chip)
540{
541 return container_of(chip->controller, struct qcom_nand_controller,
542 controller);
543}
544
545static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
546{
547 return ioread32(nandc->base + offset);
548}
549
550static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
551 u32 val)
552{
553 iowrite32(val, nandc->base + offset);
554}
555
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530556static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
557 bool is_cpu)
558{
559 if (!nandc->props->is_bam)
560 return;
561
562 if (is_cpu)
563 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
564 MAX_REG_RD *
565 sizeof(*nandc->reg_read_buf),
566 DMA_FROM_DEVICE);
567 else
568 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
569 MAX_REG_RD *
570 sizeof(*nandc->reg_read_buf),
571 DMA_FROM_DEVICE);
572}
573
Archit Tanejac76b78d2016-02-03 14:29:50 +0530574static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
575{
576 switch (offset) {
577 case NAND_FLASH_CMD:
578 return &regs->cmd;
579 case NAND_ADDR0:
580 return &regs->addr0;
581 case NAND_ADDR1:
582 return &regs->addr1;
583 case NAND_FLASH_CHIP_SELECT:
584 return &regs->chip_sel;
585 case NAND_EXEC_CMD:
586 return &regs->exec;
587 case NAND_FLASH_STATUS:
588 return &regs->clrflashstatus;
589 case NAND_DEV0_CFG0:
590 return &regs->cfg0;
591 case NAND_DEV0_CFG1:
592 return &regs->cfg1;
593 case NAND_DEV0_ECC_CFG:
594 return &regs->ecc_bch_cfg;
595 case NAND_READ_STATUS:
596 return &regs->clrreadstatus;
597 case NAND_DEV_CMD1:
598 return &regs->cmd1;
599 case NAND_DEV_CMD1_RESTORE:
600 return &regs->orig_cmd1;
601 case NAND_DEV_CMD_VLD:
602 return &regs->vld;
603 case NAND_DEV_CMD_VLD_RESTORE:
604 return &regs->orig_vld;
605 case NAND_EBI2_ECC_BUF_CFG:
606 return &regs->ecc_buf_cfg;
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530607 case NAND_READ_LOCATION_0:
608 return &regs->read_location0;
609 case NAND_READ_LOCATION_1:
610 return &regs->read_location1;
611 case NAND_READ_LOCATION_2:
612 return &regs->read_location2;
613 case NAND_READ_LOCATION_3:
614 return &regs->read_location3;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530615 default:
616 return NULL;
617 }
618}
619
620static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
621 u32 val)
622{
623 struct nandc_regs *regs = nandc->regs;
624 __le32 *reg;
625
626 reg = offset_to_nandc_reg(regs, offset);
627
628 if (reg)
629 *reg = cpu_to_le32(val);
630}
631
632/* helper to configure address register values */
633static void set_address(struct qcom_nand_host *host, u16 column, int page)
634{
635 struct nand_chip *chip = &host->chip;
636 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
637
638 if (chip->options & NAND_BUSWIDTH_16)
639 column >>= 1;
640
641 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
642 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
643}
644
645/*
646 * update_rw_regs: set up read/write register values, these will be
647 * written to the NAND controller registers via DMA
648 *
649 * @num_cw: number of steps for the read/write operation
650 * @read: read or write operation
651 */
652static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
653{
654 struct nand_chip *chip = &host->chip;
655 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
656 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
657
658 if (read) {
659 if (host->use_ecc)
660 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
661 else
662 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
663 } else {
664 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
665 }
666
667 if (host->use_ecc) {
668 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
669 (num_cw - 1) << CW_PER_PAGE;
670
671 cfg1 = host->cfg1;
672 ecc_bch_cfg = host->ecc_bch_cfg;
673 } else {
674 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
675 (num_cw - 1) << CW_PER_PAGE;
676
677 cfg1 = host->cfg1_raw;
678 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
679 }
680
681 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
682 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
683 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
684 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
685 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
686 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
687 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
688 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530689
690 if (read)
691 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
692 host->cw_data : host->cw_size, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530693}
694
Abhishek Sahu381dd242017-08-17 17:37:41 +0530695/*
696 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
697 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
698 * which will be submitted to DMA engine.
699 */
700static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
701 struct dma_chan *chan,
702 unsigned long flags)
703{
704 struct desc_info *desc;
705 struct scatterlist *sgl;
706 unsigned int sgl_cnt;
707 int ret;
708 struct bam_transaction *bam_txn = nandc->bam_txn;
709 enum dma_transfer_direction dir_eng;
710 struct dma_async_tx_descriptor *dma_desc;
711
712 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
713 if (!desc)
714 return -ENOMEM;
715
716 if (chan == nandc->cmd_chan) {
717 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
718 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
719 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
720 dir_eng = DMA_MEM_TO_DEV;
721 desc->dir = DMA_TO_DEVICE;
722 } else if (chan == nandc->tx_chan) {
723 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
724 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
725 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
726 dir_eng = DMA_MEM_TO_DEV;
727 desc->dir = DMA_TO_DEVICE;
728 } else {
729 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
730 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
731 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
732 dir_eng = DMA_DEV_TO_MEM;
733 desc->dir = DMA_FROM_DEVICE;
734 }
735
736 sg_mark_end(sgl + sgl_cnt - 1);
737 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
738 if (ret == 0) {
739 dev_err(nandc->dev, "failure in mapping desc\n");
740 kfree(desc);
741 return -ENOMEM;
742 }
743
744 desc->sgl_cnt = sgl_cnt;
745 desc->bam_sgl = sgl;
746
747 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
748 flags);
749
750 if (!dma_desc) {
751 dev_err(nandc->dev, "failure in prep desc\n");
752 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
753 kfree(desc);
754 return -EINVAL;
755 }
756
757 desc->dma_desc = dma_desc;
758
759 list_add_tail(&desc->node, &nandc->desc_list);
760
761 return 0;
762}
763
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530764/*
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530765 * Prepares the command descriptor for BAM DMA which will be used for NAND
766 * register reads and writes. The command descriptor requires the command
767 * to be formed in command element type so this function uses the command
768 * element from bam transaction ce array and fills the same with required
769 * data. A single SGL can contain multiple command elements so
770 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
771 * after the current command element.
772 */
773static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
774 int reg_off, const void *vaddr,
775 int size, unsigned int flags)
776{
777 int bam_ce_size;
778 int i, ret;
779 struct bam_cmd_element *bam_ce_buffer;
780 struct bam_transaction *bam_txn = nandc->bam_txn;
781
782 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
783
784 /* fill the command desc */
785 for (i = 0; i < size; i++) {
786 if (read)
787 bam_prep_ce(&bam_ce_buffer[i],
788 nandc_reg_phys(nandc, reg_off + 4 * i),
789 BAM_READ_COMMAND,
790 reg_buf_dma_addr(nandc,
791 (__le32 *)vaddr + i));
792 else
793 bam_prep_ce_le32(&bam_ce_buffer[i],
794 nandc_reg_phys(nandc, reg_off + 4 * i),
795 BAM_WRITE_COMMAND,
796 *((__le32 *)vaddr + i));
797 }
798
799 bam_txn->bam_ce_pos += size;
800
801 /* use the separate sgl after this command */
802 if (flags & NAND_BAM_NEXT_SGL) {
803 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
804 bam_ce_size = (bam_txn->bam_ce_pos -
805 bam_txn->bam_ce_start) *
806 sizeof(struct bam_cmd_element);
807 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
808 bam_ce_buffer, bam_ce_size);
809 bam_txn->cmd_sgl_pos++;
810 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
811
812 if (flags & NAND_BAM_NWD) {
813 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
814 DMA_PREP_FENCE |
815 DMA_PREP_CMD);
816 if (ret)
817 return ret;
818 }
819 }
820
821 return 0;
822}
823
824/*
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530825 * Prepares the data descriptor for BAM DMA which will be used for NAND
826 * data reads and writes.
827 */
828static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
829 const void *vaddr,
830 int size, unsigned int flags)
831{
832 int ret;
833 struct bam_transaction *bam_txn = nandc->bam_txn;
834
835 if (read) {
836 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
837 vaddr, size);
838 bam_txn->rx_sgl_pos++;
839 } else {
840 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
841 vaddr, size);
842 bam_txn->tx_sgl_pos++;
843
844 /*
845 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
846 * is not set, form the DMA descriptor
847 */
848 if (!(flags & NAND_BAM_NO_EOT)) {
849 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
850 DMA_PREP_INTERRUPT);
851 if (ret)
852 return ret;
853 }
854 }
855
856 return 0;
857}
858
Abhishek Sahu381dd242017-08-17 17:37:41 +0530859static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
860 int reg_off, const void *vaddr, int size,
861 bool flow_control)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530862{
863 struct desc_info *desc;
864 struct dma_async_tx_descriptor *dma_desc;
865 struct scatterlist *sgl;
866 struct dma_slave_config slave_conf;
867 enum dma_transfer_direction dir_eng;
868 int ret;
869
870 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
871 if (!desc)
872 return -ENOMEM;
873
Abhishek Sahu381dd242017-08-17 17:37:41 +0530874 sgl = &desc->adm_sgl;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530875
876 sg_init_one(sgl, vaddr, size);
877
878 if (read) {
879 dir_eng = DMA_DEV_TO_MEM;
880 desc->dir = DMA_FROM_DEVICE;
881 } else {
882 dir_eng = DMA_MEM_TO_DEV;
883 desc->dir = DMA_TO_DEVICE;
884 }
885
886 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
887 if (ret == 0) {
888 ret = -ENOMEM;
889 goto err;
890 }
891
892 memset(&slave_conf, 0x00, sizeof(slave_conf));
893
894 slave_conf.device_fc = flow_control;
895 if (read) {
896 slave_conf.src_maxburst = 16;
897 slave_conf.src_addr = nandc->base_dma + reg_off;
898 slave_conf.slave_id = nandc->data_crci;
899 } else {
900 slave_conf.dst_maxburst = 16;
901 slave_conf.dst_addr = nandc->base_dma + reg_off;
902 slave_conf.slave_id = nandc->cmd_crci;
903 }
904
905 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
906 if (ret) {
907 dev_err(nandc->dev, "failed to configure dma channel\n");
908 goto err;
909 }
910
911 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
912 if (!dma_desc) {
913 dev_err(nandc->dev, "failed to prepare desc\n");
914 ret = -EINVAL;
915 goto err;
916 }
917
918 desc->dma_desc = dma_desc;
919
920 list_add_tail(&desc->node, &nandc->desc_list);
921
922 return 0;
923err:
924 kfree(desc);
925
926 return ret;
927}
928
929/*
930 * read_reg_dma: prepares a descriptor to read a given number of
931 * contiguous registers to the reg_read_buf pointer
932 *
933 * @first: offset of the first register in the contiguous block
934 * @num_regs: number of registers to read
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530935 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530936 */
937static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530938 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530939{
940 bool flow_control = false;
941 void *vaddr;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530942
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530943 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
944 nandc->reg_read_pos += num_regs;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530945
Abhishek Sahucc409b92017-08-17 17:37:47 +0530946 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
947 first = dev_cmd_reg_addr(nandc, first);
948
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530949 if (nandc->props->is_bam)
950 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
951 num_regs, flags);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530952
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530953 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
954 flow_control = true;
955
956 return prep_adm_dma_desc(nandc, true, first, vaddr,
957 num_regs * sizeof(u32), flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530958}
959
960/*
961 * write_reg_dma: prepares a descriptor to write a given number of
962 * contiguous registers
963 *
964 * @first: offset of the first register in the contiguous block
965 * @num_regs: number of registers to write
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530966 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530967 */
968static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530969 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530970{
971 bool flow_control = false;
972 struct nandc_regs *regs = nandc->regs;
973 void *vaddr;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530974
975 vaddr = offset_to_nandc_reg(regs, first);
976
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530977 if (first == NAND_ERASED_CW_DETECT_CFG) {
978 if (flags & NAND_ERASED_CW_SET)
979 vaddr = &regs->erased_cw_detect_cfg_set;
980 else
981 vaddr = &regs->erased_cw_detect_cfg_clr;
982 }
983
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530984 if (first == NAND_EXEC_CMD)
985 flags |= NAND_BAM_NWD;
986
Abhishek Sahucc409b92017-08-17 17:37:47 +0530987 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
988 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530989
Abhishek Sahucc409b92017-08-17 17:37:47 +0530990 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
991 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530992
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530993 if (nandc->props->is_bam)
994 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
995 num_regs, flags);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530996
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530997 if (first == NAND_FLASH_CMD)
998 flow_control = true;
999
1000 return prep_adm_dma_desc(nandc, false, first, vaddr,
1001 num_regs * sizeof(u32), flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301002}
1003
1004/*
1005 * read_data_dma: prepares a DMA descriptor to transfer data from the
1006 * controller's internal buffer to the buffer 'vaddr'
1007 *
1008 * @reg_off: offset within the controller's data buffer
1009 * @vaddr: virtual address of the buffer we want to write to
1010 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301011 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +05301012 */
1013static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301014 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301015{
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301016 if (nandc->props->is_bam)
1017 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1018
Abhishek Sahu381dd242017-08-17 17:37:41 +05301019 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301020}
1021
1022/*
1023 * write_data_dma: prepares a DMA descriptor to transfer data from
1024 * 'vaddr' to the controller's internal buffer
1025 *
1026 * @reg_off: offset within the controller's data buffer
1027 * @vaddr: virtual address of the buffer we want to read from
1028 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301029 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +05301030 */
1031static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301032 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301033{
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301034 if (nandc->props->is_bam)
1035 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1036
Abhishek Sahu381dd242017-08-17 17:37:41 +05301037 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301038}
1039
1040/*
Abhishek Sahubde43302017-07-19 17:17:55 +05301041 * Helper to prepare DMA descriptors for configuring registers
1042 * before reading a NAND page.
Archit Tanejac76b78d2016-02-03 14:29:50 +05301043 */
Abhishek Sahubde43302017-07-19 17:17:55 +05301044static void config_nand_page_read(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301045{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301046 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1047 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1048 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
Abhishek Sahua86b9c42017-08-17 17:37:44 +05301049 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1050 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1051 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
Abhishek Sahubde43302017-07-19 17:17:55 +05301052}
Archit Tanejac76b78d2016-02-03 14:29:50 +05301053
Abhishek Sahubde43302017-07-19 17:17:55 +05301054/*
1055 * Helper to prepare DMA descriptors for configuring registers
1056 * before reading each codeword in NAND page.
1057 */
1058static void config_nand_cw_read(struct qcom_nand_controller *nandc)
1059{
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301060 if (nandc->props->is_bam)
1061 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1062 NAND_BAM_NEXT_SGL);
1063
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301064 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1065 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301066
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301067 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1068 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1069 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301070}
1071
1072/*
Abhishek Sahubde43302017-07-19 17:17:55 +05301073 * Helper to prepare dma descriptors to configure registers needed for reading a
1074 * single codeword in page
Archit Tanejac76b78d2016-02-03 14:29:50 +05301075 */
Abhishek Sahubde43302017-07-19 17:17:55 +05301076static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
1077{
1078 config_nand_page_read(nandc);
1079 config_nand_cw_read(nandc);
1080}
1081
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301082/*
1083 * Helper to prepare DMA descriptors used to configure registers needed for
1084 * before writing a NAND page.
1085 */
1086static void config_nand_page_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301087{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301088 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1089 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1090 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1091 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301092}
1093
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301094/*
1095 * Helper to prepare DMA descriptors for configuring registers
1096 * before writing each codeword in NAND page.
1097 */
1098static void config_nand_cw_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301099{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301100 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1101 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301102
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301103 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301104
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301105 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1106 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301107}
1108
1109/*
1110 * the following functions are used within chip->cmdfunc() to perform different
1111 * NAND_CMD_* commands
1112 */
1113
1114/* sets up descriptors for NAND_CMD_PARAM */
1115static int nandc_param(struct qcom_nand_host *host)
1116{
1117 struct nand_chip *chip = &host->chip;
1118 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1119
1120 /*
1121 * NAND_CMD_PARAM is called before we know much about the FLASH chip
1122 * in use. we configure the controller to perform a raw read of 512
1123 * bytes to read onfi params
1124 */
1125 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1126 nandc_set_reg(nandc, NAND_ADDR0, 0);
1127 nandc_set_reg(nandc, NAND_ADDR1, 0);
1128 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1129 | 512 << UD_SIZE_BYTES
1130 | 5 << NUM_ADDR_CYCLES
1131 | 0 << SPARE_SIZE_BYTES);
1132 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1133 | 0 << CS_ACTIVE_BSY
1134 | 17 << BAD_BLOCK_BYTE_NUM
1135 | 1 << BAD_BLOCK_IN_SPARE_AREA
1136 | 2 << WR_RD_BSY_GAP
1137 | 0 << WIDE_FLASH
1138 | 1 << DEV0_CFG1_ECC_DISABLE);
1139 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1140
1141 /* configure CMD1 and VLD for ONFI param probing */
1142 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
Abhishek Sahud8a9b322017-08-11 17:09:16 +05301143 (nandc->vld & ~READ_START_VLD));
Archit Tanejac76b78d2016-02-03 14:29:50 +05301144 nandc_set_reg(nandc, NAND_DEV_CMD1,
1145 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1146 | NAND_CMD_PARAM << READ_ADDR);
1147
1148 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1149
1150 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1151 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301152 nandc_set_read_loc(nandc, 0, 0, 512, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301153
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301154 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1155 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301156
1157 nandc->buf_count = 512;
1158 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1159
Abhishek Sahubde43302017-07-19 17:17:55 +05301160 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301161
1162 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301163 nandc->buf_count, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301164
1165 /* restore CMD1 and VLD regs */
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301166 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1167 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301168
1169 return 0;
1170}
1171
1172/* sets up descriptors for NAND_CMD_ERASE1 */
1173static int erase_block(struct qcom_nand_host *host, int page_addr)
1174{
1175 struct nand_chip *chip = &host->chip;
1176 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1177
1178 nandc_set_reg(nandc, NAND_FLASH_CMD,
1179 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1180 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1181 nandc_set_reg(nandc, NAND_ADDR1, 0);
1182 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1183 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1184 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1185 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1186 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1187 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1188
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301189 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1190 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1191 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301192
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301193 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301194
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301195 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1196 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301197
1198 return 0;
1199}
1200
1201/* sets up descriptors for NAND_CMD_READID */
1202static int read_id(struct qcom_nand_host *host, int column)
1203{
1204 struct nand_chip *chip = &host->chip;
1205 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1206
1207 if (column == -1)
1208 return 0;
1209
1210 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1211 nandc_set_reg(nandc, NAND_ADDR0, column);
1212 nandc_set_reg(nandc, NAND_ADDR1, 0);
Abhishek Sahu9d43f912017-08-17 17:37:45 +05301213 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1214 nandc->props->is_bam ? 0 : DM_EN);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301215 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1216
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301217 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1218 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301219
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301220 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301221
1222 return 0;
1223}
1224
1225/* sets up descriptors for NAND_CMD_RESET */
1226static int reset(struct qcom_nand_host *host)
1227{
1228 struct nand_chip *chip = &host->chip;
1229 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1230
1231 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1232 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1233
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301234 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1235 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301236
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301237 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301238
1239 return 0;
1240}
1241
1242/* helpers to submit/free our list of dma descriptors */
1243static int submit_descs(struct qcom_nand_controller *nandc)
1244{
1245 struct desc_info *desc;
1246 dma_cookie_t cookie = 0;
Abhishek Sahu381dd242017-08-17 17:37:41 +05301247 struct bam_transaction *bam_txn = nandc->bam_txn;
1248 int r;
1249
1250 if (nandc->props->is_bam) {
1251 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1252 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1253 if (r)
1254 return r;
1255 }
1256
1257 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1258 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1259 DMA_PREP_INTERRUPT);
1260 if (r)
1261 return r;
1262 }
1263
1264 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +05301265 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1266 DMA_PREP_CMD);
Abhishek Sahu381dd242017-08-17 17:37:41 +05301267 if (r)
1268 return r;
1269 }
1270 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301271
1272 list_for_each_entry(desc, &nandc->desc_list, node)
1273 cookie = dmaengine_submit(desc->dma_desc);
1274
Abhishek Sahu381dd242017-08-17 17:37:41 +05301275 if (nandc->props->is_bam) {
1276 dma_async_issue_pending(nandc->tx_chan);
1277 dma_async_issue_pending(nandc->rx_chan);
1278
1279 if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
1280 return -ETIMEDOUT;
1281 } else {
1282 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1283 return -ETIMEDOUT;
1284 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301285
1286 return 0;
1287}
1288
1289static void free_descs(struct qcom_nand_controller *nandc)
1290{
1291 struct desc_info *desc, *n;
1292
1293 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1294 list_del(&desc->node);
Abhishek Sahu381dd242017-08-17 17:37:41 +05301295
1296 if (nandc->props->is_bam)
1297 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1298 desc->sgl_cnt, desc->dir);
1299 else
1300 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1301 desc->dir);
1302
Archit Tanejac76b78d2016-02-03 14:29:50 +05301303 kfree(desc);
1304 }
1305}
1306
1307/* reset the register read buffer for next NAND operation */
1308static void clear_read_regs(struct qcom_nand_controller *nandc)
1309{
1310 nandc->reg_read_pos = 0;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301311 nandc_read_buffer_sync(nandc, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301312}
1313
1314static void pre_command(struct qcom_nand_host *host, int command)
1315{
1316 struct nand_chip *chip = &host->chip;
1317 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1318
1319 nandc->buf_count = 0;
1320 nandc->buf_start = 0;
1321 host->use_ecc = false;
1322 host->last_command = command;
1323
1324 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301325
1326 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1327 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1328 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301329}
1330
1331/*
1332 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1333 * privately maintained status byte, this status byte can be read after
1334 * NAND_CMD_STATUS is called
1335 */
1336static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1337{
1338 struct nand_chip *chip = &host->chip;
1339 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1340 struct nand_ecc_ctrl *ecc = &chip->ecc;
1341 int num_cw;
1342 int i;
1343
1344 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301345 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301346
1347 for (i = 0; i < num_cw; i++) {
1348 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1349
1350 if (flash_status & FS_MPU_ERR)
1351 host->status &= ~NAND_STATUS_WP;
1352
1353 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1354 (flash_status &
1355 FS_DEVICE_STS_ERR)))
1356 host->status |= NAND_STATUS_FAIL;
1357 }
1358}
1359
1360static void post_command(struct qcom_nand_host *host, int command)
1361{
1362 struct nand_chip *chip = &host->chip;
1363 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1364
1365 switch (command) {
1366 case NAND_CMD_READID:
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301367 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301368 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1369 nandc->buf_count);
1370 break;
1371 case NAND_CMD_PAGEPROG:
1372 case NAND_CMD_ERASE1:
1373 parse_erase_write_errors(host, command);
1374 break;
1375 default:
1376 break;
1377 }
1378}
1379
1380/*
1381 * Implements chip->cmdfunc. It's only used for a limited set of commands.
1382 * The rest of the commands wouldn't be called by upper layers. For example,
1383 * NAND_CMD_READOOB would never be called because we have our own versions
1384 * of read_oob ops for nand_ecc_ctrl.
1385 */
1386static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1387 int column, int page_addr)
1388{
1389 struct nand_chip *chip = mtd_to_nand(mtd);
1390 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1391 struct nand_ecc_ctrl *ecc = &chip->ecc;
1392 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1393 bool wait = false;
1394 int ret = 0;
1395
1396 pre_command(host, command);
1397
1398 switch (command) {
1399 case NAND_CMD_RESET:
1400 ret = reset(host);
1401 wait = true;
1402 break;
1403
1404 case NAND_CMD_READID:
1405 nandc->buf_count = 4;
1406 ret = read_id(host, column);
1407 wait = true;
1408 break;
1409
1410 case NAND_CMD_PARAM:
1411 ret = nandc_param(host);
1412 wait = true;
1413 break;
1414
1415 case NAND_CMD_ERASE1:
1416 ret = erase_block(host, page_addr);
1417 wait = true;
1418 break;
1419
1420 case NAND_CMD_READ0:
1421 /* we read the entire page for now */
1422 WARN_ON(column != 0);
1423
1424 host->use_ecc = true;
1425 set_address(host, 0, page_addr);
1426 update_rw_regs(host, ecc->steps, true);
1427 break;
1428
1429 case NAND_CMD_SEQIN:
1430 WARN_ON(column != 0);
1431 set_address(host, 0, page_addr);
1432 break;
1433
1434 case NAND_CMD_PAGEPROG:
1435 case NAND_CMD_STATUS:
1436 case NAND_CMD_NONE:
1437 default:
1438 break;
1439 }
1440
1441 if (ret) {
1442 dev_err(nandc->dev, "failure executing command %d\n",
1443 command);
1444 free_descs(nandc);
1445 return;
1446 }
1447
1448 if (wait) {
1449 ret = submit_descs(nandc);
1450 if (ret)
1451 dev_err(nandc->dev,
1452 "failure submitting descs for command %d\n",
1453 command);
1454 }
1455
1456 free_descs(nandc);
1457
1458 post_command(host, command);
1459}
1460
1461/*
1462 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1463 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1464 *
1465 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1466 * but it notifies that it is an erased CW by placing special characters at
1467 * certain offsets in the buffer.
1468 *
1469 * verify if the page is erased or not, and fix up the page for RS ECC by
1470 * replacing the special characters with 0xff.
1471 */
1472static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1473{
1474 u8 empty1, empty2;
1475
1476 /*
1477 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1478 * is erased by looking for 0x54s at offsets 3 and 175 from the
1479 * beginning of each codeword
1480 */
1481
1482 empty1 = data_buf[3];
1483 empty2 = data_buf[175];
1484
1485 /*
1486 * if the erased codework markers, if they exist override them with
1487 * 0xffs
1488 */
1489 if ((empty1 == 0x54 && empty2 == 0xff) ||
1490 (empty1 == 0xff && empty2 == 0x54)) {
1491 data_buf[3] = 0xff;
1492 data_buf[175] = 0xff;
1493 }
1494
1495 /*
1496 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1497 * restore the original values at the special offsets
1498 */
1499 if (memchr_inv(data_buf, 0xff, data_len)) {
1500 data_buf[3] = empty1;
1501 data_buf[175] = empty2;
1502
1503 return false;
1504 }
1505
1506 return true;
1507}
1508
1509struct read_stats {
1510 __le32 flash;
1511 __le32 buffer;
1512 __le32 erased_cw;
1513};
1514
1515/*
1516 * reads back status registers set by the controller to notify page read
1517 * errors. this is equivalent to what 'ecc->correct()' would do.
1518 */
1519static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1520 u8 *oob_buf)
1521{
1522 struct nand_chip *chip = &host->chip;
1523 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1524 struct mtd_info *mtd = nand_to_mtd(chip);
1525 struct nand_ecc_ctrl *ecc = &chip->ecc;
1526 unsigned int max_bitflips = 0;
1527 struct read_stats *buf;
1528 int i;
1529
1530 buf = (struct read_stats *)nandc->reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301531 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301532
1533 for (i = 0; i < ecc->steps; i++, buf++) {
1534 u32 flash, buffer, erased_cw;
1535 int data_len, oob_len;
1536
1537 if (i == (ecc->steps - 1)) {
1538 data_len = ecc->size - ((ecc->steps - 1) << 2);
1539 oob_len = ecc->steps << 2;
1540 } else {
1541 data_len = host->cw_data;
1542 oob_len = 0;
1543 }
1544
1545 flash = le32_to_cpu(buf->flash);
1546 buffer = le32_to_cpu(buf->buffer);
1547 erased_cw = le32_to_cpu(buf->erased_cw);
1548
1549 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1550 bool erased;
1551
1552 /* ignore erased codeword errors */
1553 if (host->bch_enabled) {
1554 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1555 true : false;
1556 } else {
1557 erased = erased_chunk_check_and_fixup(data_buf,
1558 data_len);
1559 }
1560
1561 if (erased) {
1562 data_buf += data_len;
1563 if (oob_buf)
1564 oob_buf += oob_len + ecc->bytes;
1565 continue;
1566 }
1567
1568 if (buffer & BS_UNCORRECTABLE_BIT) {
1569 int ret, ecclen, extraooblen;
1570 void *eccbuf;
1571
1572 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1573 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1574 extraooblen = oob_buf ? oob_len : 0;
1575
1576 /*
1577 * make sure it isn't an erased page reported
1578 * as not-erased by HW because of a few bitflips
1579 */
1580 ret = nand_check_erased_ecc_chunk(data_buf,
1581 data_len, eccbuf, ecclen, oob_buf,
1582 extraooblen, ecc->strength);
1583 if (ret < 0) {
1584 mtd->ecc_stats.failed++;
1585 } else {
1586 mtd->ecc_stats.corrected += ret;
1587 max_bitflips =
1588 max_t(unsigned int, max_bitflips, ret);
1589 }
1590 }
1591 } else {
1592 unsigned int stat;
1593
1594 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1595 mtd->ecc_stats.corrected += stat;
1596 max_bitflips = max(max_bitflips, stat);
1597 }
1598
1599 data_buf += data_len;
1600 if (oob_buf)
1601 oob_buf += oob_len + ecc->bytes;
1602 }
1603
1604 return max_bitflips;
1605}
1606
1607/*
1608 * helper to perform the actual page read operation, used by ecc->read_page(),
1609 * ecc->read_oob()
1610 */
1611static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1612 u8 *oob_buf)
1613{
1614 struct nand_chip *chip = &host->chip;
1615 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1616 struct nand_ecc_ctrl *ecc = &chip->ecc;
1617 int i, ret;
1618
Abhishek Sahubde43302017-07-19 17:17:55 +05301619 config_nand_page_read(nandc);
1620
Archit Tanejac76b78d2016-02-03 14:29:50 +05301621 /* queue cmd descs for each codeword */
1622 for (i = 0; i < ecc->steps; i++) {
1623 int data_size, oob_size;
1624
1625 if (i == (ecc->steps - 1)) {
1626 data_size = ecc->size - ((ecc->steps - 1) << 2);
1627 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1628 host->spare_bytes;
1629 } else {
1630 data_size = host->cw_data;
1631 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1632 }
1633
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301634 if (nandc->props->is_bam) {
1635 if (data_buf && oob_buf) {
1636 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1637 nandc_set_read_loc(nandc, 1, data_size,
1638 oob_size, 1);
1639 } else if (data_buf) {
1640 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1641 } else {
1642 nandc_set_read_loc(nandc, 0, data_size,
1643 oob_size, 1);
1644 }
1645 }
1646
Abhishek Sahubde43302017-07-19 17:17:55 +05301647 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301648
1649 if (data_buf)
1650 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301651 data_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301652
1653 /*
1654 * when ecc is enabled, the controller doesn't read the real
1655 * or dummy bad block markers in each chunk. To maintain a
1656 * consistent layout across RAW and ECC reads, we just
1657 * leave the real/dummy BBM offsets empty (i.e, filled with
1658 * 0xffs)
1659 */
1660 if (oob_buf) {
1661 int j;
1662
1663 for (j = 0; j < host->bbm_size; j++)
1664 *oob_buf++ = 0xff;
1665
1666 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301667 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301668 }
1669
1670 if (data_buf)
1671 data_buf += data_size;
1672 if (oob_buf)
1673 oob_buf += oob_size;
1674 }
1675
1676 ret = submit_descs(nandc);
1677 if (ret)
1678 dev_err(nandc->dev, "failure to read page/oob\n");
1679
1680 free_descs(nandc);
1681
1682 return ret;
1683}
1684
1685/*
1686 * a helper that copies the last step/codeword of a page (containing free oob)
1687 * into our local buffer
1688 */
1689static int copy_last_cw(struct qcom_nand_host *host, int page)
1690{
1691 struct nand_chip *chip = &host->chip;
1692 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1693 struct nand_ecc_ctrl *ecc = &chip->ecc;
1694 int size;
1695 int ret;
1696
1697 clear_read_regs(nandc);
1698
1699 size = host->use_ecc ? host->cw_data : host->cw_size;
1700
1701 /* prepare a clean read buffer */
1702 memset(nandc->data_buffer, 0xff, size);
1703
1704 set_address(host, host->cw_size * (ecc->steps - 1), page);
1705 update_rw_regs(host, 1, true);
1706
Abhishek Sahubde43302017-07-19 17:17:55 +05301707 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301708
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301709 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301710
1711 ret = submit_descs(nandc);
1712 if (ret)
1713 dev_err(nandc->dev, "failed to copy last codeword\n");
1714
1715 free_descs(nandc);
1716
1717 return ret;
1718}
1719
1720/* implements ecc->read_page() */
1721static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1722 uint8_t *buf, int oob_required, int page)
1723{
1724 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1725 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1726 u8 *data_buf, *oob_buf = NULL;
1727 int ret;
1728
1729 data_buf = buf;
1730 oob_buf = oob_required ? chip->oob_poi : NULL;
1731
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301732 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301733 ret = read_page_ecc(host, data_buf, oob_buf);
1734 if (ret) {
1735 dev_err(nandc->dev, "failure to read page\n");
1736 return ret;
1737 }
1738
1739 return parse_read_errors(host, data_buf, oob_buf);
1740}
1741
1742/* implements ecc->read_page_raw() */
1743static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1744 struct nand_chip *chip, uint8_t *buf,
1745 int oob_required, int page)
1746{
1747 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1748 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1749 u8 *data_buf, *oob_buf;
1750 struct nand_ecc_ctrl *ecc = &chip->ecc;
1751 int i, ret;
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301752 int read_loc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301753
1754 data_buf = buf;
1755 oob_buf = chip->oob_poi;
1756
1757 host->use_ecc = false;
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301758
1759 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301760 update_rw_regs(host, ecc->steps, true);
Abhishek Sahubde43302017-07-19 17:17:55 +05301761 config_nand_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301762
1763 for (i = 0; i < ecc->steps; i++) {
1764 int data_size1, data_size2, oob_size1, oob_size2;
1765 int reg_off = FLASH_BUF_ACC;
1766
1767 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1768 oob_size1 = host->bbm_size;
1769
1770 if (i == (ecc->steps - 1)) {
1771 data_size2 = ecc->size - data_size1 -
1772 ((ecc->steps - 1) << 2);
1773 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1774 host->spare_bytes;
1775 } else {
1776 data_size2 = host->cw_data - data_size1;
1777 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1778 }
1779
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301780 if (nandc->props->is_bam) {
1781 read_loc = 0;
1782 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1783 read_loc += data_size1;
1784
1785 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1786 read_loc += oob_size1;
1787
1788 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1789 read_loc += data_size2;
1790
1791 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1792 }
1793
Abhishek Sahubde43302017-07-19 17:17:55 +05301794 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301795
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301796 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301797 reg_off += data_size1;
1798 data_buf += data_size1;
1799
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301800 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301801 reg_off += oob_size1;
1802 oob_buf += oob_size1;
1803
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301804 read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301805 reg_off += data_size2;
1806 data_buf += data_size2;
1807
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301808 read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301809 oob_buf += oob_size2;
1810 }
1811
1812 ret = submit_descs(nandc);
1813 if (ret)
1814 dev_err(nandc->dev, "failure to read raw page\n");
1815
1816 free_descs(nandc);
1817
1818 return 0;
1819}
1820
1821/* implements ecc->read_oob() */
1822static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1823 int page)
1824{
1825 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1826 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1827 struct nand_ecc_ctrl *ecc = &chip->ecc;
1828 int ret;
1829
1830 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301831 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301832
1833 host->use_ecc = true;
1834 set_address(host, 0, page);
1835 update_rw_regs(host, ecc->steps, true);
1836
1837 ret = read_page_ecc(host, NULL, chip->oob_poi);
1838 if (ret)
1839 dev_err(nandc->dev, "failure to read oob\n");
1840
1841 return ret;
1842}
1843
1844/* implements ecc->write_page() */
1845static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1846 const uint8_t *buf, int oob_required, int page)
1847{
1848 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1849 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1850 struct nand_ecc_ctrl *ecc = &chip->ecc;
1851 u8 *data_buf, *oob_buf;
1852 int i, ret;
1853
1854 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301855 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301856
1857 data_buf = (u8 *)buf;
1858 oob_buf = chip->oob_poi;
1859
1860 host->use_ecc = true;
1861 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301862 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301863
1864 for (i = 0; i < ecc->steps; i++) {
1865 int data_size, oob_size;
1866
1867 if (i == (ecc->steps - 1)) {
1868 data_size = ecc->size - ((ecc->steps - 1) << 2);
1869 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1870 host->spare_bytes;
1871 } else {
1872 data_size = host->cw_data;
1873 oob_size = ecc->bytes;
1874 }
1875
Archit Tanejac76b78d2016-02-03 14:29:50 +05301876
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301877 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
1878 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301879
1880 /*
1881 * when ECC is enabled, we don't really need to write anything
1882 * to oob for the first n - 1 codewords since these oob regions
1883 * just contain ECC bytes that's written by the controller
1884 * itself. For the last codeword, we skip the bbm positions and
1885 * write to the free oob area.
1886 */
1887 if (i == (ecc->steps - 1)) {
1888 oob_buf += host->bbm_size;
1889
1890 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301891 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301892 }
1893
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301894 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301895
1896 data_buf += data_size;
1897 oob_buf += oob_size;
1898 }
1899
1900 ret = submit_descs(nandc);
1901 if (ret)
1902 dev_err(nandc->dev, "failure to write page\n");
1903
1904 free_descs(nandc);
1905
1906 return ret;
1907}
1908
1909/* implements ecc->write_page_raw() */
1910static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1911 struct nand_chip *chip, const uint8_t *buf,
1912 int oob_required, int page)
1913{
1914 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1915 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1916 struct nand_ecc_ctrl *ecc = &chip->ecc;
1917 u8 *data_buf, *oob_buf;
1918 int i, ret;
1919
1920 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301921 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301922
1923 data_buf = (u8 *)buf;
1924 oob_buf = chip->oob_poi;
1925
1926 host->use_ecc = false;
1927 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301928 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301929
1930 for (i = 0; i < ecc->steps; i++) {
1931 int data_size1, data_size2, oob_size1, oob_size2;
1932 int reg_off = FLASH_BUF_ACC;
1933
1934 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1935 oob_size1 = host->bbm_size;
1936
1937 if (i == (ecc->steps - 1)) {
1938 data_size2 = ecc->size - data_size1 -
1939 ((ecc->steps - 1) << 2);
1940 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1941 host->spare_bytes;
1942 } else {
1943 data_size2 = host->cw_data - data_size1;
1944 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1945 }
1946
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301947 write_data_dma(nandc, reg_off, data_buf, data_size1,
1948 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301949 reg_off += data_size1;
1950 data_buf += data_size1;
1951
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301952 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1953 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301954 reg_off += oob_size1;
1955 oob_buf += oob_size1;
1956
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301957 write_data_dma(nandc, reg_off, data_buf, data_size2,
1958 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301959 reg_off += data_size2;
1960 data_buf += data_size2;
1961
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301962 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301963 oob_buf += oob_size2;
1964
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301965 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301966 }
1967
1968 ret = submit_descs(nandc);
1969 if (ret)
1970 dev_err(nandc->dev, "failure to write raw page\n");
1971
1972 free_descs(nandc);
1973
1974 return ret;
1975}
1976
1977/*
1978 * implements ecc->write_oob()
1979 *
1980 * the NAND controller cannot write only data or only oob within a codeword,
1981 * since ecc is calculated for the combined codeword. we first copy the
1982 * entire contents for the last codeword(data + oob), replace the old oob
1983 * with the new one in chip->oob_poi, and then write the entire codeword.
1984 * this read-copy-write operation results in a slight performance loss.
1985 */
1986static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1987 int page)
1988{
1989 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1990 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1991 struct nand_ecc_ctrl *ecc = &chip->ecc;
1992 u8 *oob = chip->oob_poi;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301993 int data_size, oob_size;
1994 int ret, status = 0;
1995
1996 host->use_ecc = true;
1997
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301998 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301999 ret = copy_last_cw(host, page);
2000 if (ret)
2001 return ret;
2002
2003 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302004 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302005
2006 /* calculate the data and oob size for the last codeword/step */
2007 data_size = ecc->size - ((ecc->steps - 1) << 2);
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01002008 oob_size = mtd->oobavail;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302009
2010 /* override new oob content to last codeword */
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01002011 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2012 0, mtd->oobavail);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302013
2014 set_address(host, host->cw_size * (ecc->steps - 1), page);
2015 update_rw_regs(host, 1, false);
2016
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302017 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05302018 write_data_dma(nandc, FLASH_BUF_ACC,
2019 nandc->data_buffer, data_size + oob_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302020 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302021
2022 ret = submit_descs(nandc);
2023
2024 free_descs(nandc);
2025
2026 if (ret) {
2027 dev_err(nandc->dev, "failure to write oob\n");
2028 return -EIO;
2029 }
2030
2031 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2032
2033 status = chip->waitfunc(mtd, chip);
2034
2035 return status & NAND_STATUS_FAIL ? -EIO : 0;
2036}
2037
2038static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
2039{
2040 struct nand_chip *chip = mtd_to_nand(mtd);
2041 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2042 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2043 struct nand_ecc_ctrl *ecc = &chip->ecc;
2044 int page, ret, bbpos, bad = 0;
2045 u32 flash_status;
2046
2047 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2048
2049 /*
2050 * configure registers for a raw sub page read, the address is set to
2051 * the beginning of the last codeword, we don't care about reading ecc
2052 * portion of oob. we just want the first few bytes from this codeword
2053 * that contains the BBM
2054 */
2055 host->use_ecc = false;
2056
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302057 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302058 ret = copy_last_cw(host, page);
2059 if (ret)
2060 goto err;
2061
2062 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
2063
2064 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
2065 dev_warn(nandc->dev, "error when trying to read BBM\n");
2066 goto err;
2067 }
2068
2069 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2070
2071 bad = nandc->data_buffer[bbpos] != 0xff;
2072
2073 if (chip->options & NAND_BUSWIDTH_16)
2074 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2075err:
2076 return bad;
2077}
2078
2079static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2080{
2081 struct nand_chip *chip = mtd_to_nand(mtd);
2082 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2083 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2084 struct nand_ecc_ctrl *ecc = &chip->ecc;
2085 int page, ret, status = 0;
2086
2087 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302088 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302089
2090 /*
2091 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2092 * we don't care about the rest of the content in the codeword since
2093 * we aren't going to use this block again
2094 */
2095 memset(nandc->data_buffer, 0x00, host->cw_size);
2096
2097 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2098
2099 /* prepare write */
2100 host->use_ecc = false;
2101 set_address(host, host->cw_size * (ecc->steps - 1), page);
2102 update_rw_regs(host, 1, false);
2103
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302104 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05302105 write_data_dma(nandc, FLASH_BUF_ACC,
2106 nandc->data_buffer, host->cw_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302107 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302108
2109 ret = submit_descs(nandc);
2110
2111 free_descs(nandc);
2112
2113 if (ret) {
2114 dev_err(nandc->dev, "failure to update BBM\n");
2115 return -EIO;
2116 }
2117
2118 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2119
2120 status = chip->waitfunc(mtd, chip);
2121
2122 return status & NAND_STATUS_FAIL ? -EIO : 0;
2123}
2124
2125/*
2126 * the three functions below implement chip->read_byte(), chip->read_buf()
2127 * and chip->write_buf() respectively. these aren't used for
2128 * reading/writing page data, they are used for smaller data like reading
2129 * id, status etc
2130 */
2131static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2132{
2133 struct nand_chip *chip = mtd_to_nand(mtd);
2134 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2135 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2136 u8 *buf = nandc->data_buffer;
2137 u8 ret = 0x0;
2138
2139 if (host->last_command == NAND_CMD_STATUS) {
2140 ret = host->status;
2141
2142 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2143
2144 return ret;
2145 }
2146
2147 if (nandc->buf_start < nandc->buf_count)
2148 ret = buf[nandc->buf_start++];
2149
2150 return ret;
2151}
2152
2153static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2154{
2155 struct nand_chip *chip = mtd_to_nand(mtd);
2156 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2157 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2158
2159 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2160 nandc->buf_start += real_len;
2161}
2162
2163static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2164 int len)
2165{
2166 struct nand_chip *chip = mtd_to_nand(mtd);
2167 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2168 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2169
2170 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2171
2172 nandc->buf_start += real_len;
2173}
2174
2175/* we support only one external chip for now */
2176static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2177{
2178 struct nand_chip *chip = mtd_to_nand(mtd);
2179 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2180
2181 if (chipnr <= 0)
2182 return;
2183
2184 dev_warn(nandc->dev, "invalid chip select\n");
2185}
2186
2187/*
2188 * NAND controller page layout info
2189 *
2190 * Layout with ECC enabled:
2191 *
2192 * |----------------------| |---------------------------------|
2193 * | xx.......yy| | *********xx.......yy|
2194 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
2195 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
2196 * | xx.......yy| | *********xx.......yy|
2197 * |----------------------| |---------------------------------|
2198 * codeword 1,2..n-1 codeword n
2199 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
2200 *
2201 * n = Number of codewords in the page
2202 * . = ECC bytes
2203 * * = Spare/free bytes
2204 * x = Unused byte(s)
2205 * y = Reserved byte(s)
2206 *
2207 * 2K page: n = 4, spare = 16 bytes
2208 * 4K page: n = 8, spare = 32 bytes
2209 * 8K page: n = 16, spare = 64 bytes
2210 *
2211 * the qcom nand controller operates at a sub page/codeword level. each
2212 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2213 * the number of ECC bytes vary based on the ECC strength and the bus width.
2214 *
2215 * the first n - 1 codewords contains 516 bytes of user data, the remaining
2216 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2217 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2218 *
2219 * When we access a page with ECC enabled, the reserved bytes(s) are not
2220 * accessible at all. When reading, we fill up these unreadable positions
2221 * with 0xffs. When writing, the controller skips writing the inaccessible
2222 * bytes.
2223 *
2224 * Layout with ECC disabled:
2225 *
2226 * |------------------------------| |---------------------------------------|
2227 * | yy xx.......| | bb *********xx.......|
2228 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
2229 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
2230 * | yy xx.......| | bb *********xx.......|
2231 * |------------------------------| |---------------------------------------|
2232 * codeword 1,2..n-1 codeword n
2233 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
2234 *
2235 * n = Number of codewords in the page
2236 * . = ECC bytes
2237 * * = Spare/free bytes
2238 * x = Unused byte(s)
2239 * y = Dummy Bad Bock byte(s)
2240 * b = Real Bad Block byte(s)
2241 * size1/size2 = function of codeword size and 'n'
2242 *
2243 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2244 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2245 * Block Markers. In the last codeword, this position contains the real BBM
2246 *
2247 * In order to have a consistent layout between RAW and ECC modes, we assume
2248 * the following OOB layout arrangement:
2249 *
2250 * |-----------| |--------------------|
2251 * |yyxx.......| |bb*********xx.......|
2252 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
2253 * |yyxx.......| |bb*********xx.......|
2254 * |yyxx.......| |bb*********xx.......|
2255 * |-----------| |--------------------|
2256 * first n - 1 nth OOB region
2257 * OOB regions
2258 *
2259 * n = Number of codewords in the page
2260 * . = ECC bytes
2261 * * = FREE OOB bytes
2262 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2263 * x = Unused byte(s)
2264 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2265 *
2266 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2267 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2268 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
Boris Brezillon421e81c2016-03-18 17:54:27 +01002269 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2270 * the sum of the three).
Archit Tanejac76b78d2016-02-03 14:29:50 +05302271 */
Boris Brezillon421e81c2016-03-18 17:54:27 +01002272static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2273 struct mtd_oob_region *oobregion)
Archit Tanejac76b78d2016-02-03 14:29:50 +05302274{
Boris Brezillon421e81c2016-03-18 17:54:27 +01002275 struct nand_chip *chip = mtd_to_nand(mtd);
2276 struct qcom_nand_host *host = to_qcom_nand_host(chip);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302277 struct nand_ecc_ctrl *ecc = &chip->ecc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302278
Boris Brezillon421e81c2016-03-18 17:54:27 +01002279 if (section > 1)
2280 return -ERANGE;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302281
Boris Brezillon421e81c2016-03-18 17:54:27 +01002282 if (!section) {
2283 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2284 host->bbm_size;
2285 oobregion->offset = 0;
2286 } else {
2287 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2288 oobregion->offset = mtd->oobsize - oobregion->length;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302289 }
2290
Boris Brezillon421e81c2016-03-18 17:54:27 +01002291 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302292}
2293
Boris Brezillon421e81c2016-03-18 17:54:27 +01002294static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2295 struct mtd_oob_region *oobregion)
2296{
2297 struct nand_chip *chip = mtd_to_nand(mtd);
2298 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2299 struct nand_ecc_ctrl *ecc = &chip->ecc;
2300
2301 if (section)
2302 return -ERANGE;
2303
2304 oobregion->length = ecc->steps * 4;
2305 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2306
2307 return 0;
2308}
2309
2310static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2311 .ecc = qcom_nand_ooblayout_ecc,
2312 .free = qcom_nand_ooblayout_free,
2313};
2314
Archit Tanejac76b78d2016-02-03 14:29:50 +05302315static int qcom_nand_host_setup(struct qcom_nand_host *host)
2316{
2317 struct nand_chip *chip = &host->chip;
2318 struct mtd_info *mtd = nand_to_mtd(chip);
2319 struct nand_ecc_ctrl *ecc = &chip->ecc;
2320 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2321 int cwperpage, bad_block_byte;
2322 bool wide_bus;
2323 int ecc_mode = 1;
2324
2325 /*
2326 * the controller requires each step consists of 512 bytes of data.
2327 * bail out if DT has populated a wrong step size.
2328 */
2329 if (ecc->size != NANDC_STEP_SIZE) {
2330 dev_err(nandc->dev, "invalid ecc size\n");
2331 return -EINVAL;
2332 }
2333
2334 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2335
2336 if (ecc->strength >= 8) {
2337 /* 8 bit ECC defaults to BCH ECC on all platforms */
2338 host->bch_enabled = true;
2339 ecc_mode = 1;
2340
2341 if (wide_bus) {
2342 host->ecc_bytes_hw = 14;
2343 host->spare_bytes = 0;
2344 host->bbm_size = 2;
2345 } else {
2346 host->ecc_bytes_hw = 13;
2347 host->spare_bytes = 2;
2348 host->bbm_size = 1;
2349 }
2350 } else {
2351 /*
2352 * if the controller supports BCH for 4 bit ECC, the controller
2353 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2354 * always 10 bytes
2355 */
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302356 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
Archit Tanejac76b78d2016-02-03 14:29:50 +05302357 /* BCH */
2358 host->bch_enabled = true;
2359 ecc_mode = 0;
2360
2361 if (wide_bus) {
2362 host->ecc_bytes_hw = 8;
2363 host->spare_bytes = 2;
2364 host->bbm_size = 2;
2365 } else {
2366 host->ecc_bytes_hw = 7;
2367 host->spare_bytes = 4;
2368 host->bbm_size = 1;
2369 }
2370 } else {
2371 /* RS */
2372 host->ecc_bytes_hw = 10;
2373
2374 if (wide_bus) {
2375 host->spare_bytes = 0;
2376 host->bbm_size = 2;
2377 } else {
2378 host->spare_bytes = 1;
2379 host->bbm_size = 1;
2380 }
2381 }
2382 }
2383
2384 /*
2385 * we consider ecc->bytes as the sum of all the non-data content in a
2386 * step. It gives us a clean representation of the oob area (even if
2387 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2388 * ECC and 12 bytes for 4 bit ECC
2389 */
2390 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2391
2392 ecc->read_page = qcom_nandc_read_page;
2393 ecc->read_page_raw = qcom_nandc_read_page_raw;
2394 ecc->read_oob = qcom_nandc_read_oob;
2395 ecc->write_page = qcom_nandc_write_page;
2396 ecc->write_page_raw = qcom_nandc_write_page_raw;
2397 ecc->write_oob = qcom_nandc_write_oob;
2398
2399 ecc->mode = NAND_ECC_HW;
2400
Boris Brezillon421e81c2016-03-18 17:54:27 +01002401 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302402
2403 cwperpage = mtd->writesize / ecc->size;
Abhishek Sahucb80f112017-08-17 17:37:40 +05302404 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2405 cwperpage);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302406
2407 /*
2408 * DATA_UD_BYTES varies based on whether the read/write command protects
2409 * spare data with ECC too. We protect spare data by default, so we set
2410 * it to main + spare data, which are 512 and 4 bytes respectively.
2411 */
2412 host->cw_data = 516;
2413
2414 /*
2415 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2416 * for 8 bit ECC
2417 */
2418 host->cw_size = host->cw_data + ecc->bytes;
2419
2420 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
2421 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
2422 return -EINVAL;
2423 }
2424
2425 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2426
2427 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2428 | host->cw_data << UD_SIZE_BYTES
2429 | 0 << DISABLE_STATUS_AFTER_WRITE
2430 | 5 << NUM_ADDR_CYCLES
2431 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2432 | 0 << STATUS_BFR_READ
2433 | 1 << SET_RD_MODE_AFTER_STATUS
2434 | host->spare_bytes << SPARE_SIZE_BYTES;
2435
2436 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2437 | 0 << CS_ACTIVE_BSY
2438 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2439 | 0 << BAD_BLOCK_IN_SPARE_AREA
2440 | 2 << WR_RD_BSY_GAP
2441 | wide_bus << WIDE_FLASH
2442 | host->bch_enabled << ENABLE_BCH_ECC;
2443
2444 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2445 | host->cw_size << UD_SIZE_BYTES
2446 | 5 << NUM_ADDR_CYCLES
2447 | 0 << SPARE_SIZE_BYTES;
2448
2449 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2450 | 0 << CS_ACTIVE_BSY
2451 | 17 << BAD_BLOCK_BYTE_NUM
2452 | 1 << BAD_BLOCK_IN_SPARE_AREA
2453 | 2 << WR_RD_BSY_GAP
2454 | wide_bus << WIDE_FLASH
2455 | 1 << DEV0_CFG1_ECC_DISABLE;
2456
Abhishek Sahu10777de2017-08-03 17:56:39 +02002457 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
Archit Tanejac76b78d2016-02-03 14:29:50 +05302458 | 0 << ECC_SW_RESET
2459 | host->cw_data << ECC_NUM_DATA_BYTES
2460 | 1 << ECC_FORCE_CLK_OPEN
2461 | ecc_mode << ECC_MODE
2462 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2463
2464 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2465
2466 host->clrflashstatus = FS_READY_BSY_N;
2467 host->clrreadstatus = 0xc0;
Abhishek Sahua86b9c42017-08-17 17:37:44 +05302468 nandc->regs->erased_cw_detect_cfg_clr =
2469 cpu_to_le32(CLR_ERASED_PAGE_DET);
2470 nandc->regs->erased_cw_detect_cfg_set =
2471 cpu_to_le32(SET_ERASED_PAGE_DET);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302472
2473 dev_dbg(nandc->dev,
2474 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2475 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2476 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2477 cwperpage);
2478
2479 return 0;
2480}
2481
2482static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2483{
2484 int ret;
2485
2486 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2487 if (ret) {
2488 dev_err(nandc->dev, "failed to set DMA mask\n");
2489 return ret;
2490 }
2491
2492 /*
2493 * we use the internal buffer for reading ONFI params, reading small
2494 * data like ID and status, and preforming read-copy-write operations
2495 * when writing to a codeword partially. 532 is the maximum possible
2496 * size of a codeword for our nand controller
2497 */
2498 nandc->buf_size = 532;
2499
2500 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2501 GFP_KERNEL);
2502 if (!nandc->data_buffer)
2503 return -ENOMEM;
2504
2505 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2506 GFP_KERNEL);
2507 if (!nandc->regs)
2508 return -ENOMEM;
2509
2510 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
2511 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
2512 GFP_KERNEL);
2513 if (!nandc->reg_read_buf)
2514 return -ENOMEM;
2515
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302516 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302517 nandc->reg_read_dma =
2518 dma_map_single(nandc->dev, nandc->reg_read_buf,
2519 MAX_REG_RD *
2520 sizeof(*nandc->reg_read_buf),
2521 DMA_FROM_DEVICE);
2522 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2523 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2524 return -EIO;
2525 }
2526
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302527 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2528 if (!nandc->tx_chan) {
2529 dev_err(nandc->dev, "failed to request tx channel\n");
2530 return -ENODEV;
2531 }
2532
2533 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2534 if (!nandc->rx_chan) {
2535 dev_err(nandc->dev, "failed to request rx channel\n");
2536 return -ENODEV;
2537 }
2538
2539 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2540 if (!nandc->cmd_chan) {
2541 dev_err(nandc->dev, "failed to request cmd channel\n");
2542 return -ENODEV;
2543 }
Abhishek Sahucb80f112017-08-17 17:37:40 +05302544
2545 /*
2546 * Initially allocate BAM transaction to read ONFI param page.
2547 * After detecting all the devices, this BAM transaction will
2548 * be freed and the next BAM tranasction will be allocated with
2549 * maximum codeword size
2550 */
2551 nandc->max_cwperpage = 1;
2552 nandc->bam_txn = alloc_bam_transaction(nandc);
2553 if (!nandc->bam_txn) {
2554 dev_err(nandc->dev,
2555 "failed to allocate bam transaction\n");
2556 return -ENOMEM;
2557 }
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302558 } else {
2559 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2560 if (!nandc->chan) {
2561 dev_err(nandc->dev,
2562 "failed to request slave channel\n");
2563 return -ENODEV;
2564 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302565 }
2566
2567 INIT_LIST_HEAD(&nandc->desc_list);
2568 INIT_LIST_HEAD(&nandc->host_list);
2569
Marc Gonzalezd45bc582016-07-27 11:23:52 +02002570 nand_hw_control_init(&nandc->controller);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302571
2572 return 0;
2573}
2574
2575static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2576{
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302577 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302578 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2579 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2580 MAX_REG_RD *
2581 sizeof(*nandc->reg_read_buf),
2582 DMA_FROM_DEVICE);
2583
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302584 if (nandc->tx_chan)
2585 dma_release_channel(nandc->tx_chan);
2586
2587 if (nandc->rx_chan)
2588 dma_release_channel(nandc->rx_chan);
2589
2590 if (nandc->cmd_chan)
2591 dma_release_channel(nandc->cmd_chan);
2592 } else {
2593 if (nandc->chan)
2594 dma_release_channel(nandc->chan);
2595 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302596}
2597
2598/* one time setup of a few nand controller registers */
2599static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2600{
Abhishek Sahu9d43f912017-08-17 17:37:45 +05302601 u32 nand_ctrl;
2602
Archit Tanejac76b78d2016-02-03 14:29:50 +05302603 /* kill onenand */
2604 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
Abhishek Sahucc409b92017-08-17 17:37:47 +05302605 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2606 NAND_DEV_CMD_VLD_VAL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302607
Abhishek Sahu9d43f912017-08-17 17:37:45 +05302608 /* enable ADM or BAM DMA */
2609 if (nandc->props->is_bam) {
2610 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2611 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2612 } else {
2613 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2614 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302615
2616 /* save the original values of these registers */
Abhishek Sahucc409b92017-08-17 17:37:47 +05302617 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
Abhishek Sahud8a9b322017-08-11 17:09:16 +05302618 nandc->vld = NAND_DEV_CMD_VLD_VAL;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302619
2620 return 0;
2621}
2622
2623static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2624 struct qcom_nand_host *host,
2625 struct device_node *dn)
2626{
2627 struct nand_chip *chip = &host->chip;
2628 struct mtd_info *mtd = nand_to_mtd(chip);
2629 struct device *dev = nandc->dev;
2630 int ret;
2631
2632 ret = of_property_read_u32(dn, "reg", &host->cs);
2633 if (ret) {
2634 dev_err(dev, "can't get chip-select\n");
2635 return -ENXIO;
2636 }
2637
2638 nand_set_flash_node(chip, dn);
2639 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2640 mtd->owner = THIS_MODULE;
2641 mtd->dev.parent = dev;
2642
2643 chip->cmdfunc = qcom_nandc_command;
2644 chip->select_chip = qcom_nandc_select_chip;
2645 chip->read_byte = qcom_nandc_read_byte;
2646 chip->read_buf = qcom_nandc_read_buf;
2647 chip->write_buf = qcom_nandc_write_buf;
Boris Brezillon4a78cc62017-05-26 17:10:15 +02002648 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
2649 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302650
2651 /*
2652 * the bad block marker is readable only when we read the last codeword
2653 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2654 * helpers don't allow us to read BB from a nand chip with ECC
2655 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2656 * and block_markbad helpers until we permanently switch to using
2657 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2658 */
2659 chip->block_bad = qcom_nandc_block_bad;
2660 chip->block_markbad = qcom_nandc_block_markbad;
2661
2662 chip->controller = &nandc->controller;
2663 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2664 NAND_SKIP_BBTSCAN;
2665
2666 /* set up initial status value */
2667 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2668
2669 ret = nand_scan_ident(mtd, 1, NULL);
2670 if (ret)
2671 return ret;
2672
2673 ret = qcom_nand_host_setup(host);
Abhishek Sahu89f51272017-07-19 17:17:58 +05302674
2675 return ret;
2676}
2677
2678static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
2679 struct qcom_nand_host *host,
2680 struct device_node *dn)
2681{
2682 struct nand_chip *chip = &host->chip;
2683 struct mtd_info *mtd = nand_to_mtd(chip);
2684 int ret;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302685
2686 ret = nand_scan_tail(mtd);
2687 if (ret)
2688 return ret;
2689
Abhishek Sahu89f51272017-07-19 17:17:58 +05302690 ret = mtd_device_register(mtd, NULL, 0);
2691 if (ret)
2692 nand_cleanup(mtd_to_nand(mtd));
2693
2694 return ret;
2695}
2696
2697static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2698{
2699 struct device *dev = nandc->dev;
2700 struct device_node *dn = dev->of_node, *child;
2701 struct qcom_nand_host *host, *tmp;
2702 int ret;
2703
2704 for_each_available_child_of_node(dn, child) {
2705 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2706 if (!host) {
2707 of_node_put(child);
2708 return -ENOMEM;
2709 }
2710
2711 ret = qcom_nand_host_init(nandc, host, child);
2712 if (ret) {
2713 devm_kfree(dev, host);
2714 continue;
2715 }
2716
2717 list_add_tail(&host->node, &nandc->host_list);
2718 }
2719
2720 if (list_empty(&nandc->host_list))
2721 return -ENODEV;
2722
Abhishek Sahucb80f112017-08-17 17:37:40 +05302723 if (nandc->props->is_bam) {
2724 free_bam_transaction(nandc);
2725 nandc->bam_txn = alloc_bam_transaction(nandc);
2726 if (!nandc->bam_txn) {
2727 dev_err(nandc->dev,
2728 "failed to allocate bam transaction\n");
2729 return -ENOMEM;
2730 }
2731 }
2732
Abhishek Sahu89f51272017-07-19 17:17:58 +05302733 list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
2734 ret = qcom_nand_mtd_register(nandc, host, child);
2735 if (ret) {
2736 list_del(&host->node);
2737 devm_kfree(dev, host);
2738 }
2739 }
2740
2741 if (list_empty(&nandc->host_list))
2742 return -ENODEV;
2743
2744 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302745}
2746
2747/* parse custom DT properties here */
2748static int qcom_nandc_parse_dt(struct platform_device *pdev)
2749{
2750 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2751 struct device_node *np = nandc->dev->of_node;
2752 int ret;
2753
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302754 if (!nandc->props->is_bam) {
2755 ret = of_property_read_u32(np, "qcom,cmd-crci",
2756 &nandc->cmd_crci);
2757 if (ret) {
2758 dev_err(nandc->dev, "command CRCI unspecified\n");
2759 return ret;
2760 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302761
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302762 ret = of_property_read_u32(np, "qcom,data-crci",
2763 &nandc->data_crci);
2764 if (ret) {
2765 dev_err(nandc->dev, "data CRCI unspecified\n");
2766 return ret;
2767 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302768 }
2769
2770 return 0;
2771}
2772
2773static int qcom_nandc_probe(struct platform_device *pdev)
2774{
2775 struct qcom_nand_controller *nandc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302776 const void *dev_data;
2777 struct device *dev = &pdev->dev;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302778 struct resource *res;
2779 int ret;
2780
2781 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2782 if (!nandc)
2783 return -ENOMEM;
2784
2785 platform_set_drvdata(pdev, nandc);
2786 nandc->dev = dev;
2787
2788 dev_data = of_device_get_match_data(dev);
2789 if (!dev_data) {
2790 dev_err(&pdev->dev, "failed to get device data\n");
2791 return -ENODEV;
2792 }
2793
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302794 nandc->props = dev_data;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302795
2796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2797 nandc->base = devm_ioremap_resource(dev, res);
2798 if (IS_ERR(nandc->base))
2799 return PTR_ERR(nandc->base);
2800
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +05302801 nandc->base_phys = res->start;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302802 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2803
2804 nandc->core_clk = devm_clk_get(dev, "core");
2805 if (IS_ERR(nandc->core_clk))
2806 return PTR_ERR(nandc->core_clk);
2807
2808 nandc->aon_clk = devm_clk_get(dev, "aon");
2809 if (IS_ERR(nandc->aon_clk))
2810 return PTR_ERR(nandc->aon_clk);
2811
2812 ret = qcom_nandc_parse_dt(pdev);
2813 if (ret)
2814 return ret;
2815
2816 ret = qcom_nandc_alloc(nandc);
2817 if (ret)
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302818 goto err_core_clk;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302819
2820 ret = clk_prepare_enable(nandc->core_clk);
2821 if (ret)
2822 goto err_core_clk;
2823
2824 ret = clk_prepare_enable(nandc->aon_clk);
2825 if (ret)
2826 goto err_aon_clk;
2827
2828 ret = qcom_nandc_setup(nandc);
2829 if (ret)
2830 goto err_setup;
2831
Abhishek Sahu89f51272017-07-19 17:17:58 +05302832 ret = qcom_probe_nand_devices(nandc);
2833 if (ret)
2834 goto err_setup;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302835
2836 return 0;
2837
Archit Tanejac76b78d2016-02-03 14:29:50 +05302838err_setup:
2839 clk_disable_unprepare(nandc->aon_clk);
2840err_aon_clk:
2841 clk_disable_unprepare(nandc->core_clk);
2842err_core_clk:
2843 qcom_nandc_unalloc(nandc);
2844
2845 return ret;
2846}
2847
2848static int qcom_nandc_remove(struct platform_device *pdev)
2849{
2850 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2851 struct qcom_nand_host *host;
2852
2853 list_for_each_entry(host, &nandc->host_list, node)
2854 nand_release(nand_to_mtd(&host->chip));
2855
2856 qcom_nandc_unalloc(nandc);
2857
2858 clk_disable_unprepare(nandc->aon_clk);
2859 clk_disable_unprepare(nandc->core_clk);
2860
2861 return 0;
2862}
2863
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302864static const struct qcom_nandc_props ipq806x_nandc_props = {
2865 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +05302866 .is_bam = false,
Abhishek Sahucc409b92017-08-17 17:37:47 +05302867 .dev_cmd_reg_start = 0x0,
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302868};
Archit Tanejac76b78d2016-02-03 14:29:50 +05302869
Abhishek Sahua0637832017-08-17 17:37:53 +05302870static const struct qcom_nandc_props ipq4019_nandc_props = {
2871 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2872 .is_bam = true,
2873 .dev_cmd_reg_start = 0x0,
2874};
2875
Abhishek Sahudce84762017-08-17 17:37:54 +05302876static const struct qcom_nandc_props ipq8074_nandc_props = {
2877 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2878 .is_bam = true,
2879 .dev_cmd_reg_start = 0x7000,
2880};
2881
Archit Tanejac76b78d2016-02-03 14:29:50 +05302882/*
2883 * data will hold a struct pointer containing more differences once we support
2884 * more controller variants
2885 */
2886static const struct of_device_id qcom_nandc_of_match[] = {
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302887 {
2888 .compatible = "qcom,ipq806x-nand",
2889 .data = &ipq806x_nandc_props,
Archit Tanejac76b78d2016-02-03 14:29:50 +05302890 },
Abhishek Sahua0637832017-08-17 17:37:53 +05302891 {
2892 .compatible = "qcom,ipq4019-nand",
2893 .data = &ipq4019_nandc_props,
2894 },
Abhishek Sahudce84762017-08-17 17:37:54 +05302895 {
2896 .compatible = "qcom,ipq8074-nand",
2897 .data = &ipq8074_nandc_props,
2898 },
Archit Tanejac76b78d2016-02-03 14:29:50 +05302899 {}
2900};
2901MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2902
2903static struct platform_driver qcom_nandc_driver = {
2904 .driver = {
2905 .name = "qcom-nandc",
2906 .of_match_table = qcom_nandc_of_match,
2907 },
2908 .probe = qcom_nandc_probe,
2909 .remove = qcom_nandc_remove,
2910};
2911module_platform_driver(qcom_nandc_driver);
2912
2913MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2914MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2915MODULE_LICENSE("GPL v2");