blob: 6d4a42e393801cf9aa5a9557b2d41308bab27342 [file] [log] [blame]
Vimal Singh67ce04b2009-05-12 13:47:03 -07001/*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/delay.h>
Sukumar Ghorai4e070372011-01-28 15:42:06 +053014#include <linux/interrupt.h>
vimal singhc276aca2009-06-27 11:07:06 +053015#include <linux/jiffies.h>
16#include <linux/sched.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070017#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h>
20#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070022
Tony Lindgrence491cf2009-10-20 09:40:47 -070023#include <plat/dma.h>
24#include <plat/gpmc.h>
25#include <plat/nand.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070026
Vimal Singh67ce04b2009-05-12 13:47:03 -070027#define DRIVER_NAME "omap2-nand"
Sukumar Ghorai4e070372011-01-28 15:42:06 +053028#define OMAP_NAND_TIMEOUT_MS 5000
Vimal Singh67ce04b2009-05-12 13:47:03 -070029
Vimal Singh67ce04b2009-05-12 13:47:03 -070030#define NAND_Ecc_P1e (1 << 0)
31#define NAND_Ecc_P2e (1 << 1)
32#define NAND_Ecc_P4e (1 << 2)
33#define NAND_Ecc_P8e (1 << 3)
34#define NAND_Ecc_P16e (1 << 4)
35#define NAND_Ecc_P32e (1 << 5)
36#define NAND_Ecc_P64e (1 << 6)
37#define NAND_Ecc_P128e (1 << 7)
38#define NAND_Ecc_P256e (1 << 8)
39#define NAND_Ecc_P512e (1 << 9)
40#define NAND_Ecc_P1024e (1 << 10)
41#define NAND_Ecc_P2048e (1 << 11)
42
43#define NAND_Ecc_P1o (1 << 16)
44#define NAND_Ecc_P2o (1 << 17)
45#define NAND_Ecc_P4o (1 << 18)
46#define NAND_Ecc_P8o (1 << 19)
47#define NAND_Ecc_P16o (1 << 20)
48#define NAND_Ecc_P32o (1 << 21)
49#define NAND_Ecc_P64o (1 << 22)
50#define NAND_Ecc_P128o (1 << 23)
51#define NAND_Ecc_P256o (1 << 24)
52#define NAND_Ecc_P512o (1 << 25)
53#define NAND_Ecc_P1024o (1 << 26)
54#define NAND_Ecc_P2048o (1 << 27)
55
56#define TF(value) (value ? 1 : 0)
57
58#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
59#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
60#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
61#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
62#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
63#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
64#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
65#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
66
67#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
68#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
69#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
70#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
71#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
72#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
73#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
74#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
75
76#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
77#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
78#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
79#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
80#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
81#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
82#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
83#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
84
85#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
86#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
87#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
88#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
89#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
90#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
91#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
92#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
93
94#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
95#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
96
97#ifdef CONFIG_MTD_PARTITIONS
98static const char *part_probes[] = { "cmdlinepart", NULL };
99#endif
100
101struct omap_nand_info {
102 struct nand_hw_control controller;
103 struct omap_nand_platform_data *pdata;
104 struct mtd_info mtd;
105 struct mtd_partition *parts;
106 struct nand_chip nand;
107 struct platform_device *pdev;
108
109 int gpmc_cs;
110 unsigned long phys_base;
vimal singhdfe32892009-07-13 16:29:16 +0530111 struct completion comp;
112 int dma_ch;
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530113 int gpmc_irq;
114 enum {
115 OMAP_NAND_IO_READ = 0, /* read */
116 OMAP_NAND_IO_WRITE, /* write */
117 } iomode;
118 u_char *buf;
119 int buf_len;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700120};
121
122/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700123 * omap_hwcontrol - hardware specific access to control-lines
124 * @mtd: MTD device structure
125 * @cmd: command to device
126 * @ctrl:
127 * NAND_NCE: bit 0 -> don't care
128 * NAND_CLE: bit 1 -> Command Latch
129 * NAND_ALE: bit 2 -> Address Latch
130 *
131 * NOTE: boards may use different bits for these!!
132 */
133static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
134{
135 struct omap_nand_info *info = container_of(mtd,
136 struct omap_nand_info, mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700137
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000138 if (cmd != NAND_CMD_NONE) {
139 if (ctrl & NAND_CLE)
140 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700141
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000142 else if (ctrl & NAND_ALE)
143 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
144
145 else /* NAND_NCE */
146 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700147 }
Vimal Singh67ce04b2009-05-12 13:47:03 -0700148}
149
150/**
vimal singh59e9c5a2009-07-13 16:26:24 +0530151 * omap_read_buf8 - read data from NAND controller into buffer
152 * @mtd: MTD device structure
153 * @buf: buffer to store date
154 * @len: number of bytes to read
155 */
156static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
157{
158 struct nand_chip *nand = mtd->priv;
159
160 ioread8_rep(nand->IO_ADDR_R, buf, len);
161}
162
163/**
164 * omap_write_buf8 - write buffer to NAND controller
165 * @mtd: MTD device structure
166 * @buf: data buffer
167 * @len: number of bytes to write
168 */
169static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
170{
171 struct omap_nand_info *info = container_of(mtd,
172 struct omap_nand_info, mtd);
173 u_char *p = (u_char *)buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000174 u32 status = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530175
176 while (len--) {
177 iowrite8(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000178 /* wait until buffer is available for write */
179 do {
180 status = gpmc_read_status(GPMC_STATUS_BUFFER);
181 } while (!status);
vimal singh59e9c5a2009-07-13 16:26:24 +0530182 }
183}
184
185/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700186 * omap_read_buf16 - read data from NAND controller into buffer
187 * @mtd: MTD device structure
188 * @buf: buffer to store date
189 * @len: number of bytes to read
190 */
191static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
192{
193 struct nand_chip *nand = mtd->priv;
194
vimal singh59e9c5a2009-07-13 16:26:24 +0530195 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700196}
197
198/**
199 * omap_write_buf16 - write buffer to NAND controller
200 * @mtd: MTD device structure
201 * @buf: data buffer
202 * @len: number of bytes to write
203 */
204static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
205{
206 struct omap_nand_info *info = container_of(mtd,
207 struct omap_nand_info, mtd);
208 u16 *p = (u16 *) buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000209 u32 status = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700210 /* FIXME try bursts of writesw() or DMA ... */
211 len >>= 1;
212
213 while (len--) {
vimal singh59e9c5a2009-07-13 16:26:24 +0530214 iowrite16(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000215 /* wait until buffer is available for write */
216 do {
217 status = gpmc_read_status(GPMC_STATUS_BUFFER);
218 } while (!status);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700219 }
220}
vimal singh59e9c5a2009-07-13 16:26:24 +0530221
222/**
223 * omap_read_buf_pref - read data from NAND controller into buffer
224 * @mtd: MTD device structure
225 * @buf: buffer to store date
226 * @len: number of bytes to read
227 */
228static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
229{
230 struct omap_nand_info *info = container_of(mtd,
231 struct omap_nand_info, mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000232 uint32_t r_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530233 int ret = 0;
234 u32 *p = (u32 *)buf;
235
236 /* take care of subpage reads */
Vimal Singhc3341d02010-01-07 12:16:26 +0530237 if (len % 4) {
238 if (info->nand.options & NAND_BUSWIDTH_16)
239 omap_read_buf16(mtd, buf, len % 4);
240 else
241 omap_read_buf8(mtd, buf, len % 4);
242 p = (u32 *) (buf + len % 4);
243 len -= len % 4;
vimal singh59e9c5a2009-07-13 16:26:24 +0530244 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530245
246 /* configure and start prefetch transfer */
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530247 ret = gpmc_prefetch_enable(info->gpmc_cs,
248 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
vimal singh59e9c5a2009-07-13 16:26:24 +0530249 if (ret) {
250 /* PFPW engine is busy, use cpu copy method */
251 if (info->nand.options & NAND_BUSWIDTH_16)
252 omap_read_buf16(mtd, buf, len);
253 else
254 omap_read_buf8(mtd, buf, len);
255 } else {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000256 p = (u32 *) buf;
vimal singh59e9c5a2009-07-13 16:26:24 +0530257 do {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000258 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
259 r_count = r_count >> 2;
260 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
vimal singh59e9c5a2009-07-13 16:26:24 +0530261 p += r_count;
262 len -= r_count << 2;
263 } while (len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530264 /* disable and stop the PFPW engine */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000265 gpmc_prefetch_reset(info->gpmc_cs);
vimal singh59e9c5a2009-07-13 16:26:24 +0530266 }
267}
268
269/**
270 * omap_write_buf_pref - write buffer to NAND controller
271 * @mtd: MTD device structure
272 * @buf: data buffer
273 * @len: number of bytes to write
274 */
275static void omap_write_buf_pref(struct mtd_info *mtd,
276 const u_char *buf, int len)
277{
278 struct omap_nand_info *info = container_of(mtd,
279 struct omap_nand_info, mtd);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530280 uint32_t w_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530281 int i = 0, ret = 0;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000282 u16 *p;
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530283 unsigned long tim, limit;
vimal singh59e9c5a2009-07-13 16:26:24 +0530284
285 /* take care of subpage writes */
286 if (len % 2 != 0) {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000287 writeb(*buf, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530288 p = (u16 *)(buf + 1);
289 len--;
290 }
291
292 /* configure and start prefetch transfer */
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530293 ret = gpmc_prefetch_enable(info->gpmc_cs,
294 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
vimal singh59e9c5a2009-07-13 16:26:24 +0530295 if (ret) {
296 /* PFPW engine is busy, use cpu copy method */
297 if (info->nand.options & NAND_BUSWIDTH_16)
298 omap_write_buf16(mtd, buf, len);
299 else
300 omap_write_buf8(mtd, buf, len);
301 } else {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000302 p = (u16 *) buf;
303 while (len) {
304 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
305 w_count = w_count >> 1;
vimal singh59e9c5a2009-07-13 16:26:24 +0530306 for (i = 0; (i < w_count) && len; i++, len -= 2)
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000307 iowrite16(*p++, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530308 }
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000309 /* wait for data to flushed-out before reset the prefetch */
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530310 tim = 0;
311 limit = (loops_per_jiffy *
312 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
313 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
314 cpu_relax();
315
vimal singh59e9c5a2009-07-13 16:26:24 +0530316 /* disable and stop the PFPW engine */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000317 gpmc_prefetch_reset(info->gpmc_cs);
vimal singh59e9c5a2009-07-13 16:26:24 +0530318 }
319}
320
vimal singhdfe32892009-07-13 16:29:16 +0530321/*
322 * omap_nand_dma_cb: callback on the completion of dma transfer
323 * @lch: logical channel
324 * @ch_satuts: channel status
325 * @data: pointer to completion data structure
326 */
327static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
328{
329 complete((struct completion *) data);
330}
331
332/*
333 * omap_nand_dma_transfer: configer and start dma transfer
334 * @mtd: MTD device structure
335 * @addr: virtual address in RAM of source/destination
336 * @len: number of data bytes to be transferred
337 * @is_write: flag for read/write operation
338 */
339static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
340 unsigned int len, int is_write)
341{
342 struct omap_nand_info *info = container_of(mtd,
343 struct omap_nand_info, mtd);
vimal singhdfe32892009-07-13 16:29:16 +0530344 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
345 DMA_FROM_DEVICE;
346 dma_addr_t dma_addr;
347 int ret;
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530348 unsigned long tim, limit;
vimal singhdfe32892009-07-13 16:29:16 +0530349
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530350 /* The fifo depth is 64 bytes max.
351 * But configure the FIFO-threahold to 32 to get a sync at each frame
352 * and frame length is 32 bytes.
vimal singhdfe32892009-07-13 16:29:16 +0530353 */
354 int buf_len = len >> 6;
355
356 if (addr >= high_memory) {
357 struct page *p1;
358
359 if (((size_t)addr & PAGE_MASK) !=
360 ((size_t)(addr + len - 1) & PAGE_MASK))
361 goto out_copy;
362 p1 = vmalloc_to_page(addr);
363 if (!p1)
364 goto out_copy;
365 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
366 }
367
368 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
369 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
370 dev_err(&info->pdev->dev,
371 "Couldn't DMA map a %d byte buffer\n", len);
372 goto out_copy;
373 }
374
375 if (is_write) {
376 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
377 info->phys_base, 0, 0);
378 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
379 dma_addr, 0, 0);
380 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
381 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
382 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
383 } else {
384 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
385 info->phys_base, 0, 0);
386 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
387 dma_addr, 0, 0);
388 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
389 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
390 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
391 }
392 /* configure and start prefetch transfer */
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530393 ret = gpmc_prefetch_enable(info->gpmc_cs,
394 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
vimal singhdfe32892009-07-13 16:29:16 +0530395 if (ret)
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530396 /* PFPW engine is busy, use cpu copy method */
vimal singhdfe32892009-07-13 16:29:16 +0530397 goto out_copy;
398
399 init_completion(&info->comp);
400
401 omap_start_dma(info->dma_ch);
402
403 /* setup and start DMA using dma_addr */
404 wait_for_completion(&info->comp);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530405 tim = 0;
406 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
407 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
408 cpu_relax();
vimal singhdfe32892009-07-13 16:29:16 +0530409
vimal singhdfe32892009-07-13 16:29:16 +0530410 /* disable and stop the PFPW engine */
Daniel J Bluemanf12f6622010-09-29 21:01:55 +0100411 gpmc_prefetch_reset(info->gpmc_cs);
vimal singhdfe32892009-07-13 16:29:16 +0530412
413 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
414 return 0;
415
416out_copy:
417 if (info->nand.options & NAND_BUSWIDTH_16)
418 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
419 : omap_write_buf16(mtd, (u_char *) addr, len);
420 else
421 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
422 : omap_write_buf8(mtd, (u_char *) addr, len);
423 return 0;
424}
vimal singhdfe32892009-07-13 16:29:16 +0530425
426/**
427 * omap_read_buf_dma_pref - read data from NAND controller into buffer
428 * @mtd: MTD device structure
429 * @buf: buffer to store date
430 * @len: number of bytes to read
431 */
432static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
433{
434 if (len <= mtd->oobsize)
435 omap_read_buf_pref(mtd, buf, len);
436 else
437 /* start transfer in DMA mode */
438 omap_nand_dma_transfer(mtd, buf, len, 0x0);
439}
440
441/**
442 * omap_write_buf_dma_pref - write buffer to NAND controller
443 * @mtd: MTD device structure
444 * @buf: data buffer
445 * @len: number of bytes to write
446 */
447static void omap_write_buf_dma_pref(struct mtd_info *mtd,
448 const u_char *buf, int len)
449{
450 if (len <= mtd->oobsize)
451 omap_write_buf_pref(mtd, buf, len);
452 else
453 /* start transfer in DMA mode */
Vimal Singhbdaefc42010-01-05 12:49:24 +0530454 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
vimal singhdfe32892009-07-13 16:29:16 +0530455}
456
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530457/*
458 * omap_nand_irq - GMPC irq handler
459 * @this_irq: gpmc irq number
460 * @dev: omap_nand_info structure pointer is passed here
461 */
462static irqreturn_t omap_nand_irq(int this_irq, void *dev)
463{
464 struct omap_nand_info *info = (struct omap_nand_info *) dev;
465 u32 bytes;
466 u32 irq_stat;
467
468 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
469 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
470 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
471 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
472 if (irq_stat & 0x2)
473 goto done;
474
475 if (info->buf_len && (info->buf_len < bytes))
476 bytes = info->buf_len;
477 else if (!info->buf_len)
478 bytes = 0;
479 iowrite32_rep(info->nand.IO_ADDR_W,
480 (u32 *)info->buf, bytes >> 2);
481 info->buf = info->buf + bytes;
482 info->buf_len -= bytes;
483
484 } else {
485 ioread32_rep(info->nand.IO_ADDR_R,
486 (u32 *)info->buf, bytes >> 2);
487 info->buf = info->buf + bytes;
488
489 if (irq_stat & 0x2)
490 goto done;
491 }
492 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
493
494 return IRQ_HANDLED;
495
496done:
497 complete(&info->comp);
498 /* disable irq */
499 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
500
501 /* clear status */
502 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
503
504 return IRQ_HANDLED;
505}
506
507/*
508 * omap_read_buf_irq_pref - read data from NAND controller into buffer
509 * @mtd: MTD device structure
510 * @buf: buffer to store date
511 * @len: number of bytes to read
512 */
513static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
514{
515 struct omap_nand_info *info = container_of(mtd,
516 struct omap_nand_info, mtd);
517 int ret = 0;
518
519 if (len <= mtd->oobsize) {
520 omap_read_buf_pref(mtd, buf, len);
521 return;
522 }
523
524 info->iomode = OMAP_NAND_IO_READ;
525 info->buf = buf;
526 init_completion(&info->comp);
527
528 /* configure and start prefetch transfer */
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530529 ret = gpmc_prefetch_enable(info->gpmc_cs,
530 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530531 if (ret)
532 /* PFPW engine is busy, use cpu copy method */
533 goto out_copy;
534
535 info->buf_len = len;
536 /* enable irq */
537 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
538 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
539
540 /* waiting for read to complete */
541 wait_for_completion(&info->comp);
542
543 /* disable and stop the PFPW engine */
544 gpmc_prefetch_reset(info->gpmc_cs);
545 return;
546
547out_copy:
548 if (info->nand.options & NAND_BUSWIDTH_16)
549 omap_read_buf16(mtd, buf, len);
550 else
551 omap_read_buf8(mtd, buf, len);
552}
553
554/*
555 * omap_write_buf_irq_pref - write buffer to NAND controller
556 * @mtd: MTD device structure
557 * @buf: data buffer
558 * @len: number of bytes to write
559 */
560static void omap_write_buf_irq_pref(struct mtd_info *mtd,
561 const u_char *buf, int len)
562{
563 struct omap_nand_info *info = container_of(mtd,
564 struct omap_nand_info, mtd);
565 int ret = 0;
566 unsigned long tim, limit;
567
568 if (len <= mtd->oobsize) {
569 omap_write_buf_pref(mtd, buf, len);
570 return;
571 }
572
573 info->iomode = OMAP_NAND_IO_WRITE;
574 info->buf = (u_char *) buf;
575 init_completion(&info->comp);
576
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530577 /* configure and start prefetch transfer : size=24 */
578 ret = gpmc_prefetch_enable(info->gpmc_cs,
579 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530580 if (ret)
581 /* PFPW engine is busy, use cpu copy method */
582 goto out_copy;
583
584 info->buf_len = len;
585 /* enable irq */
586 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
587 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
588
589 /* waiting for write to complete */
590 wait_for_completion(&info->comp);
591 /* wait for data to flushed-out before reset the prefetch */
592 tim = 0;
593 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
594 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
595 cpu_relax();
596
597 /* disable and stop the PFPW engine */
598 gpmc_prefetch_reset(info->gpmc_cs);
599 return;
600
601out_copy:
602 if (info->nand.options & NAND_BUSWIDTH_16)
603 omap_write_buf16(mtd, buf, len);
604 else
605 omap_write_buf8(mtd, buf, len);
606}
607
Vimal Singh67ce04b2009-05-12 13:47:03 -0700608/**
609 * omap_verify_buf - Verify chip data against buffer
610 * @mtd: MTD device structure
611 * @buf: buffer containing the data to compare
612 * @len: number of bytes to compare
613 */
614static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
615{
616 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
617 mtd);
618 u16 *p = (u16 *) buf;
619
620 len >>= 1;
621 while (len--) {
622 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
623 return -EFAULT;
624 }
625
626 return 0;
627}
628
Vimal Singh67ce04b2009-05-12 13:47:03 -0700629/**
630 * gen_true_ecc - This function will generate true ECC value
631 * @ecc_buf: buffer to store ecc code
632 *
633 * This generated true ECC value can be used when correcting
634 * data read from NAND flash memory core
635 */
636static void gen_true_ecc(u8 *ecc_buf)
637{
638 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
639 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
640
641 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
642 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
643 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
644 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
645 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
646 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
647}
648
649/**
650 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
651 * @ecc_data1: ecc code from nand spare area
652 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
653 * @page_data: page data
654 *
655 * This function compares two ECC's and indicates if there is an error.
656 * If the error can be corrected it will be corrected to the buffer.
657 */
658static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
659 u8 *ecc_data2, /* read from register */
660 u8 *page_data)
661{
662 uint i;
663 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
664 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
665 u8 ecc_bit[24];
666 u8 ecc_sum = 0;
667 u8 find_bit = 0;
668 uint find_byte = 0;
669 int isEccFF;
670
671 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
672
673 gen_true_ecc(ecc_data1);
674 gen_true_ecc(ecc_data2);
675
676 for (i = 0; i <= 2; i++) {
677 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
678 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
679 }
680
681 for (i = 0; i < 8; i++) {
682 tmp0_bit[i] = *ecc_data1 % 2;
683 *ecc_data1 = *ecc_data1 / 2;
684 }
685
686 for (i = 0; i < 8; i++) {
687 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
688 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
689 }
690
691 for (i = 0; i < 8; i++) {
692 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
693 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
694 }
695
696 for (i = 0; i < 8; i++) {
697 comp0_bit[i] = *ecc_data2 % 2;
698 *ecc_data2 = *ecc_data2 / 2;
699 }
700
701 for (i = 0; i < 8; i++) {
702 comp1_bit[i] = *(ecc_data2 + 1) % 2;
703 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
704 }
705
706 for (i = 0; i < 8; i++) {
707 comp2_bit[i] = *(ecc_data2 + 2) % 2;
708 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
709 }
710
711 for (i = 0; i < 6; i++)
712 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
713
714 for (i = 0; i < 8; i++)
715 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
716
717 for (i = 0; i < 8; i++)
718 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
719
720 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
721 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
722
723 for (i = 0; i < 24; i++)
724 ecc_sum += ecc_bit[i];
725
726 switch (ecc_sum) {
727 case 0:
728 /* Not reached because this function is not called if
729 * ECC values are equal
730 */
731 return 0;
732
733 case 1:
734 /* Uncorrectable error */
735 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
736 return -1;
737
738 case 11:
739 /* UN-Correctable error */
740 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
741 return -1;
742
743 case 12:
744 /* Correctable error */
745 find_byte = (ecc_bit[23] << 8) +
746 (ecc_bit[21] << 7) +
747 (ecc_bit[19] << 6) +
748 (ecc_bit[17] << 5) +
749 (ecc_bit[15] << 4) +
750 (ecc_bit[13] << 3) +
751 (ecc_bit[11] << 2) +
752 (ecc_bit[9] << 1) +
753 ecc_bit[7];
754
755 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
756
757 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
758 "offset: %d, bit: %d\n", find_byte, find_bit);
759
760 page_data[find_byte] ^= (1 << find_bit);
761
762 return 0;
763 default:
764 if (isEccFF) {
765 if (ecc_data2[0] == 0 &&
766 ecc_data2[1] == 0 &&
767 ecc_data2[2] == 0)
768 return 0;
769 }
770 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
771 return -1;
772 }
773}
774
775/**
776 * omap_correct_data - Compares the ECC read with HW generated ECC
777 * @mtd: MTD device structure
778 * @dat: page data
779 * @read_ecc: ecc read from nand flash
780 * @calc_ecc: ecc read from HW ECC registers
781 *
782 * Compares the ecc read from nand spare area with ECC registers values
783 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
784 * and correction.
785 */
786static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
787 u_char *read_ecc, u_char *calc_ecc)
788{
789 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
790 mtd);
791 int blockCnt = 0, i = 0, ret = 0;
792
793 /* Ex NAND_ECC_HW12_2048 */
794 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
795 (info->nand.ecc.size == 2048))
796 blockCnt = 4;
797 else
798 blockCnt = 1;
799
800 for (i = 0; i < blockCnt; i++) {
801 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
802 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
803 if (ret < 0)
804 return ret;
805 }
806 read_ecc += 3;
807 calc_ecc += 3;
808 dat += 512;
809 }
810 return 0;
811}
812
813/**
814 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
815 * @mtd: MTD device structure
816 * @dat: The pointer to data on which ecc is computed
817 * @ecc_code: The ecc_code buffer
818 *
819 * Using noninverted ECC can be considered ugly since writing a blank
820 * page ie. padding will clear the ECC bytes. This is no problem as long
821 * nobody is trying to write data on the seemingly unused page. Reading
822 * an erased page will produce an ECC mismatch between generated and read
823 * ECC bytes that has to be dealt with separately.
824 */
825static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
826 u_char *ecc_code)
827{
828 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
829 mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000830 return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700831}
832
833/**
834 * omap_enable_hwecc - This function enables the hardware ecc functionality
835 * @mtd: MTD device structure
836 * @mode: Read/Write mode
837 */
838static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
839{
840 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
841 mtd);
842 struct nand_chip *chip = mtd->priv;
843 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700844
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000845 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700846}
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000847
Vimal Singh67ce04b2009-05-12 13:47:03 -0700848/**
849 * omap_wait - wait until the command is done
850 * @mtd: MTD device structure
851 * @chip: NAND Chip structure
852 *
853 * Wait function is called during Program and erase operations and
854 * the way it is called from MTD layer, we should wait till the NAND
855 * chip is ready after the programming/erase operation has completed.
856 *
857 * Erase can take up to 400ms and program up to 20ms according to
858 * general NAND and SmartMedia specs
859 */
860static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
861{
862 struct nand_chip *this = mtd->priv;
863 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
864 mtd);
865 unsigned long timeo = jiffies;
vimal singhc276aca2009-06-27 11:07:06 +0530866 int status = NAND_STATUS_FAIL, state = this->state;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700867
868 if (state == FL_ERASING)
869 timeo += (HZ * 400) / 1000;
870 else
871 timeo += (HZ * 20) / 1000;
872
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000873 gpmc_nand_write(info->gpmc_cs,
874 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
Vimal Singh67ce04b2009-05-12 13:47:03 -0700875 while (time_before(jiffies, timeo)) {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000876 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
vimal singhc276aca2009-06-27 11:07:06 +0530877 if (status & NAND_STATUS_READY)
Vimal Singh67ce04b2009-05-12 13:47:03 -0700878 break;
vimal singhc276aca2009-06-27 11:07:06 +0530879 cond_resched();
Vimal Singh67ce04b2009-05-12 13:47:03 -0700880 }
881 return status;
882}
883
884/**
885 * omap_dev_ready - calls the platform specific dev_ready function
886 * @mtd: MTD device structure
887 */
888static int omap_dev_ready(struct mtd_info *mtd)
889{
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000890 unsigned int val = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700891 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
892 mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700893
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000894 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700895 if ((val & 0x100) == 0x100) {
896 /* Clear IRQ Interrupt */
897 val |= 0x100;
898 val &= ~(0x0);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000899 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700900 } else {
901 unsigned int cnt = 0;
902 while (cnt++ < 0x1FF) {
903 if ((val & 0x100) == 0x100)
904 return 0;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000905 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700906 }
907 }
908
909 return 1;
910}
911
912static int __devinit omap_nand_probe(struct platform_device *pdev)
913{
914 struct omap_nand_info *info;
915 struct omap_nand_platform_data *pdata;
916 int err;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700917
918 pdata = pdev->dev.platform_data;
919 if (pdata == NULL) {
920 dev_err(&pdev->dev, "platform data missing\n");
921 return -ENODEV;
922 }
923
924 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
925 if (!info)
926 return -ENOMEM;
927
928 platform_set_drvdata(pdev, info);
929
930 spin_lock_init(&info->controller.lock);
931 init_waitqueue_head(&info->controller.wq);
932
933 info->pdev = pdev;
934
935 info->gpmc_cs = pdata->cs;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800936 info->phys_base = pdata->phys_base;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700937
938 info->mtd.priv = &info->nand;
939 info->mtd.name = dev_name(&pdev->dev);
940 info->mtd.owner = THIS_MODULE;
941
Sukumar Ghoraid5ce2b62011-01-28 15:42:03 +0530942 info->nand.options = pdata->devsize;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800943 info->nand.options |= NAND_SKIP_BBTSCAN;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700944
945 /* NAND write protect off */
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000946 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700947
948 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
949 pdev->dev.driver->name)) {
950 err = -EBUSY;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800951 goto out_free_info;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700952 }
953
954 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
955 if (!info->nand.IO_ADDR_R) {
956 err = -ENOMEM;
957 goto out_release_mem_region;
958 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530959
Vimal Singh67ce04b2009-05-12 13:47:03 -0700960 info->nand.controller = &info->controller;
961
962 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
963 info->nand.cmd_ctrl = omap_hwcontrol;
964
Vimal Singh67ce04b2009-05-12 13:47:03 -0700965 /*
966 * If RDY/BSY line is connected to OMAP then use the omap ready
967 * funcrtion and the generic nand_wait function which reads the status
968 * register after monitoring the RDY/BSY line.Otherwise use a standard
969 * chip delay which is slightly more than tR (AC Timing) of the NAND
970 * device and read status register until you get a failure or success
971 */
972 if (pdata->dev_ready) {
973 info->nand.dev_ready = omap_dev_ready;
974 info->nand.chip_delay = 0;
975 } else {
976 info->nand.waitfunc = omap_wait;
977 info->nand.chip_delay = 50;
978 }
979
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530980 switch (pdata->xfer_type) {
981 case NAND_OMAP_PREFETCH_POLLED:
vimal singh59e9c5a2009-07-13 16:26:24 +0530982 info->nand.read_buf = omap_read_buf_pref;
983 info->nand.write_buf = omap_write_buf_pref;
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530984 break;
vimal singhdfe32892009-07-13 16:29:16 +0530985
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530986 case NAND_OMAP_POLLED:
vimal singh59e9c5a2009-07-13 16:26:24 +0530987 if (info->nand.options & NAND_BUSWIDTH_16) {
988 info->nand.read_buf = omap_read_buf16;
989 info->nand.write_buf = omap_write_buf16;
990 } else {
991 info->nand.read_buf = omap_read_buf8;
992 info->nand.write_buf = omap_write_buf8;
993 }
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530994 break;
995
996 case NAND_OMAP_PREFETCH_DMA:
997 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
998 omap_nand_dma_cb, &info->comp, &info->dma_ch);
999 if (err < 0) {
1000 info->dma_ch = -1;
1001 dev_err(&pdev->dev, "DMA request failed!\n");
1002 goto out_release_mem_region;
1003 } else {
1004 omap_set_dma_dest_burst_mode(info->dma_ch,
1005 OMAP_DMA_DATA_BURST_16);
1006 omap_set_dma_src_burst_mode(info->dma_ch,
1007 OMAP_DMA_DATA_BURST_16);
1008
1009 info->nand.read_buf = omap_read_buf_dma_pref;
1010 info->nand.write_buf = omap_write_buf_dma_pref;
1011 }
1012 break;
1013
Sukumar Ghorai4e070372011-01-28 15:42:06 +05301014 case NAND_OMAP_PREFETCH_IRQ:
1015 err = request_irq(pdata->gpmc_irq,
1016 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1017 if (err) {
1018 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1019 pdata->gpmc_irq, err);
1020 goto out_release_mem_region;
1021 } else {
1022 info->gpmc_irq = pdata->gpmc_irq;
1023 info->nand.read_buf = omap_read_buf_irq_pref;
1024 info->nand.write_buf = omap_write_buf_irq_pref;
1025 }
1026 break;
1027
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301028 default:
1029 dev_err(&pdev->dev,
1030 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1031 err = -EINVAL;
1032 goto out_release_mem_region;
vimal singh59e9c5a2009-07-13 16:26:24 +05301033 }
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301034
vimal singh59e9c5a2009-07-13 16:26:24 +05301035 info->nand.verify_buf = omap_verify_buf;
1036
Sukumar Ghoraif3d73f32011-01-28 15:42:08 +05301037 /* selsect the ecc type */
1038 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1039 info->nand.ecc.mode = NAND_ECC_SOFT;
1040 else if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) {
1041 info->nand.ecc.bytes = 3;
1042 info->nand.ecc.size = 512;
1043 info->nand.ecc.calculate = omap_calculate_ecc;
1044 info->nand.ecc.hwctl = omap_enable_hwecc;
1045 info->nand.ecc.correct = omap_correct_data;
1046 info->nand.ecc.mode = NAND_ECC_HW;
1047 }
Vimal Singh67ce04b2009-05-12 13:47:03 -07001048
1049 /* DIP switches on some boards change between 8 and 16 bit
1050 * bus widths for flash. Try the other width if the first try fails.
1051 */
1052 if (nand_scan(&info->mtd, 1)) {
1053 info->nand.options ^= NAND_BUSWIDTH_16;
1054 if (nand_scan(&info->mtd, 1)) {
1055 err = -ENXIO;
1056 goto out_release_mem_region;
1057 }
1058 }
1059
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301060
Vimal Singh67ce04b2009-05-12 13:47:03 -07001061#ifdef CONFIG_MTD_PARTITIONS
1062 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1063 if (err > 0)
1064 add_mtd_partitions(&info->mtd, info->parts, err);
1065 else if (pdata->parts)
1066 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
1067 else
1068#endif
1069 add_mtd_device(&info->mtd);
1070
1071 platform_set_drvdata(pdev, &info->mtd);
1072
1073 return 0;
1074
1075out_release_mem_region:
1076 release_mem_region(info->phys_base, NAND_IO_SIZE);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001077out_free_info:
1078 kfree(info);
1079
1080 return err;
1081}
1082
1083static int omap_nand_remove(struct platform_device *pdev)
1084{
1085 struct mtd_info *mtd = platform_get_drvdata(pdev);
Vimal Singhf35b6ed2010-01-05 16:01:08 +05301086 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1087 mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001088
1089 platform_set_drvdata(pdev, NULL);
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301090 if (info->dma_ch != -1)
vimal singhdfe32892009-07-13 16:29:16 +05301091 omap_free_dma(info->dma_ch);
1092
Sukumar Ghorai4e070372011-01-28 15:42:06 +05301093 if (info->gpmc_irq)
1094 free_irq(info->gpmc_irq, info);
1095
Vimal Singh67ce04b2009-05-12 13:47:03 -07001096 /* Release NAND device, its internal structures and partitions */
1097 nand_release(&info->mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +00001098 iounmap(info->nand.IO_ADDR_R);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001099 kfree(&info->mtd);
1100 return 0;
1101}
1102
1103static struct platform_driver omap_nand_driver = {
1104 .probe = omap_nand_probe,
1105 .remove = omap_nand_remove,
1106 .driver = {
1107 .name = DRIVER_NAME,
1108 .owner = THIS_MODULE,
1109 },
1110};
1111
1112static int __init omap_nand_init(void)
1113{
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301114 pr_info("%s driver initializing\n", DRIVER_NAME);
vimal singhdfe32892009-07-13 16:29:16 +05301115
Vimal Singh67ce04b2009-05-12 13:47:03 -07001116 return platform_driver_register(&omap_nand_driver);
1117}
1118
1119static void __exit omap_nand_exit(void)
1120{
1121 platform_driver_unregister(&omap_nand_driver);
1122}
1123
1124module_init(omap_nand_init);
1125module_exit(omap_nand_exit);
1126
1127MODULE_ALIAS(DRIVER_NAME);
1128MODULE_LICENSE("GPL");
1129MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");