blob: 055ae8bd943faee478134e4e783a5a878f3a345f [file] [log] [blame]
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001/*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
Santosh Shilimkar44169072009-05-28 14:16:04 -07008 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030015#undef DEBUG
16
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +053017#include <linux/irq.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070018#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/err.h>
21#include <linux/clk.h>
Imre Deakf37e4582006-09-25 12:41:33 +030022#include <linux/ioport.h>
23#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010024#include <linux/io.h>
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030025#include <linux/module.h>
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +053026#include <linux/interrupt.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070027
Kyungmin Park7f245162006-12-29 16:48:51 -080028#include <asm/mach-types.h>
Tony Lindgrence491cf2009-10-20 09:40:47 -070029#include <plat/gpmc.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070030
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -070031#include <plat/cpu.h>
Tony Lindgrendbc04162012-08-31 10:59:07 -070032#include <plat/gpmc.h>
Tony Lindgrence491cf2009-10-20 09:40:47 -070033#include <plat/sdrc.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070034
Tony Lindgrendbc04162012-08-31 10:59:07 -070035#include "soc.h"
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -070036#include "common.h"
37
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030038/* GPMC register offsets */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070039#define GPMC_REVISION 0x00
40#define GPMC_SYSCONFIG 0x10
41#define GPMC_SYSSTATUS 0x14
42#define GPMC_IRQSTATUS 0x18
43#define GPMC_IRQENABLE 0x1c
44#define GPMC_TIMEOUT_CONTROL 0x40
45#define GPMC_ERR_ADDRESS 0x44
46#define GPMC_ERR_TYPE 0x48
47#define GPMC_CONFIG 0x50
48#define GPMC_STATUS 0x54
49#define GPMC_PREFETCH_CONFIG1 0x1e0
50#define GPMC_PREFETCH_CONFIG2 0x1e4
Thara Gopinath15e02a32008-04-28 16:55:01 +053051#define GPMC_PREFETCH_CONTROL 0x1ec
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070052#define GPMC_PREFETCH_STATUS 0x1f0
53#define GPMC_ECC_CONFIG 0x1f4
54#define GPMC_ECC_CONTROL 0x1f8
55#define GPMC_ECC_SIZE_CONFIG 0x1fc
Sukumar Ghorai948d38e2010-07-09 09:14:44 +000056#define GPMC_ECC1_RESULT 0x200
Ivan Djelic8d602cf2012-04-26 14:17:49 +020057#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070058
Yegor Yefremov2c65e742012-05-09 08:32:49 -070059/* GPMC ECC control settings */
60#define GPMC_ECC_CTRL_ECCCLEAR 0x100
61#define GPMC_ECC_CTRL_ECCDISABLE 0x000
62#define GPMC_ECC_CTRL_ECCREG1 0x001
63#define GPMC_ECC_CTRL_ECCREG2 0x002
64#define GPMC_ECC_CTRL_ECCREG3 0x003
65#define GPMC_ECC_CTRL_ECCREG4 0x004
66#define GPMC_ECC_CTRL_ECCREG5 0x005
67#define GPMC_ECC_CTRL_ECCREG6 0x006
68#define GPMC_ECC_CTRL_ECCREG7 0x007
69#define GPMC_ECC_CTRL_ECCREG8 0x008
70#define GPMC_ECC_CTRL_ECCREG9 0x009
71
Sukumar Ghorai948d38e2010-07-09 09:14:44 +000072#define GPMC_CS0_OFFSET 0x60
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070073#define GPMC_CS_SIZE 0x30
74
Imre Deakf37e4582006-09-25 12:41:33 +030075#define GPMC_MEM_START 0x00000000
76#define GPMC_MEM_END 0x3FFFFFFF
77#define BOOT_ROM_SPACE 0x100000 /* 1MB */
78
79#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
80#define GPMC_SECTION_SHIFT 28 /* 128 MB */
81
vimal singh59e9c5a2009-07-13 16:26:24 +053082#define CS_NUM_SHIFT 24
83#define ENABLE_PREFETCH (0x1 << 7)
84#define DMA_MPU_MODE 2
85
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -070086/* XXX: Only NAND irq has been considered,currently these are the only ones used
87 */
88#define GPMC_NR_IRQ 2
89
90struct gpmc_client_irq {
91 unsigned irq;
92 u32 bitmask;
93};
94
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +053095/* Structure to save gpmc cs context */
96struct gpmc_cs_config {
97 u32 config1;
98 u32 config2;
99 u32 config3;
100 u32 config4;
101 u32 config5;
102 u32 config6;
103 u32 config7;
104 int is_valid;
105};
106
107/*
108 * Structure to save/restore gpmc context
109 * to support core off on OMAP3
110 */
111struct omap3_gpmc_regs {
112 u32 sysconfig;
113 u32 irqenable;
114 u32 timeout_ctrl;
115 u32 config;
116 u32 prefetch_config1;
117 u32 prefetch_config2;
118 u32 prefetch_control;
119 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
120};
121
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700122static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
123static struct irq_chip gpmc_irq_chip;
124static unsigned gpmc_irq_start;
125
Imre Deakf37e4582006-09-25 12:41:33 +0300126static struct resource gpmc_mem_root;
127static struct resource gpmc_cs_mem[GPMC_CS_NUM];
Thomas Gleixner87b247c2007-05-10 22:33:04 -0700128static DEFINE_SPINLOCK(gpmc_mem_lock);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000129static unsigned int gpmc_cs_map; /* flag for cs which are initialized */
130static int gpmc_ecc_used = -EINVAL; /* cs using ecc engine */
Imre Deakf37e4582006-09-25 12:41:33 +0300131
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300132static void __iomem *gpmc_base;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700133
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300134static struct clk *gpmc_l3_clk;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700135
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530136static irqreturn_t gpmc_handle_irq(int irq, void *dev);
137
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700138static void gpmc_write_reg(int idx, u32 val)
139{
140 __raw_writel(val, gpmc_base + idx);
141}
142
143static u32 gpmc_read_reg(int idx)
144{
145 return __raw_readl(gpmc_base + idx);
146}
147
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000148static void gpmc_cs_write_byte(int cs, int idx, u8 val)
149{
150 void __iomem *reg_addr;
151
152 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
153 __raw_writeb(val, reg_addr);
154}
155
156static u8 gpmc_cs_read_byte(int cs, int idx)
157{
158 void __iomem *reg_addr;
159
160 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
161 return __raw_readb(reg_addr);
162}
163
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700164void gpmc_cs_write_reg(int cs, int idx, u32 val)
165{
166 void __iomem *reg_addr;
167
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000168 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700169 __raw_writel(val, reg_addr);
170}
171
172u32 gpmc_cs_read_reg(int cs, int idx)
173{
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300174 void __iomem *reg_addr;
175
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000176 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300177 return __raw_readl(reg_addr);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700178}
179
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300180/* TODO: Add support for gpmc_fck to clock framework and use it */
David Brownell1c22cc12006-12-06 17:13:55 -0800181unsigned long gpmc_get_fclk_period(void)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700182{
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300183 unsigned long rate = clk_get_rate(gpmc_l3_clk);
184
185 if (rate == 0) {
186 printk(KERN_WARNING "gpmc_l3_clk not enabled\n");
187 return 0;
188 }
189
190 rate /= 1000;
191 rate = 1000000000 / rate; /* In picoseconds */
192
193 return rate;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700194}
195
196unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
197{
198 unsigned long tick_ps;
199
200 /* Calculate in picosecs to yield more exact results */
201 tick_ps = gpmc_get_fclk_period();
202
203 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
204}
205
Adrian Huntera3551f52010-12-09 10:48:27 +0200206unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
207{
208 unsigned long tick_ps;
209
210 /* Calculate in picosecs to yield more exact results */
211 tick_ps = gpmc_get_fclk_period();
212
213 return (time_ps + tick_ps - 1) / tick_ps;
214}
215
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300216unsigned int gpmc_ticks_to_ns(unsigned int ticks)
217{
218 return ticks * gpmc_get_fclk_period() / 1000;
219}
220
Kai Svahn23300592007-01-26 12:29:40 -0800221unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns)
222{
223 unsigned long ticks = gpmc_ns_to_ticks(time_ns);
224
225 return ticks * gpmc_get_fclk_period() / 1000;
226}
227
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700228#ifdef DEBUG
229static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
Juha Yrjola2aab6462006-06-26 16:16:21 -0700230 int time, const char *name)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700231#else
232static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
233 int time)
234#endif
235{
236 u32 l;
237 int ticks, mask, nr_bits;
238
239 if (time == 0)
240 ticks = 0;
241 else
242 ticks = gpmc_ns_to_ticks(time);
243 nr_bits = end_bit - st_bit + 1;
David Brownell1c22cc12006-12-06 17:13:55 -0800244 if (ticks >= 1 << nr_bits) {
245#ifdef DEBUG
246 printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
247 cs, name, time, ticks, 1 << nr_bits);
248#endif
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700249 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800250 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700251
252 mask = (1 << nr_bits) - 1;
253 l = gpmc_cs_read_reg(cs, reg);
254#ifdef DEBUG
David Brownell1c22cc12006-12-06 17:13:55 -0800255 printk(KERN_INFO
256 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
Juha Yrjola2aab6462006-06-26 16:16:21 -0700257 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
David Brownell1c22cc12006-12-06 17:13:55 -0800258 (l >> st_bit) & mask, time);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700259#endif
260 l &= ~(mask << st_bit);
261 l |= ticks << st_bit;
262 gpmc_cs_write_reg(cs, reg, l);
263
264 return 0;
265}
266
267#ifdef DEBUG
268#define GPMC_SET_ONE(reg, st, end, field) \
269 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
270 t->field, #field) < 0) \
271 return -1
272#else
273#define GPMC_SET_ONE(reg, st, end, field) \
274 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
275 return -1
276#endif
277
278int gpmc_cs_calc_divider(int cs, unsigned int sync_clk)
279{
280 int div;
281 u32 l;
282
Adrian Huntera3551f52010-12-09 10:48:27 +0200283 l = sync_clk + (gpmc_get_fclk_period() - 1);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700284 div = l / gpmc_get_fclk_period();
285 if (div > 4)
286 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800287 if (div <= 0)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700288 div = 1;
289
290 return div;
291}
292
293int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
294{
295 int div;
296 u32 l;
297
298 div = gpmc_cs_calc_divider(cs, t->sync_clk);
299 if (div < 0)
300 return -1;
301
302 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
303 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
304 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
305
306 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
307 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
308 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
309
310 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
311 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
312 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
313 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
314
315 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
316 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
317 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
318
319 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
320
Syed Mohammed, Khasimcc26b3b2008-10-09 17:51:41 +0300321 if (cpu_is_omap34xx()) {
322 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
323 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
324 }
325
David Brownell1c22cc12006-12-06 17:13:55 -0800326 /* caller is expected to have initialized CONFIG1 to cover
327 * at least sync vs async
328 */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700329 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
David Brownell1c22cc12006-12-06 17:13:55 -0800330 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
331#ifdef DEBUG
332 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
333 cs, (div * gpmc_get_fclk_period()) / 1000, div);
334#endif
335 l &= ~0x03;
336 l |= (div - 1);
337 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
338 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700339
340 return 0;
341}
342
Imre Deakf37e4582006-09-25 12:41:33 +0300343static void gpmc_cs_enable_mem(int cs, u32 base, u32 size)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700344{
Imre Deakf37e4582006-09-25 12:41:33 +0300345 u32 l;
346 u32 mask;
347
348 mask = (1 << GPMC_SECTION_SHIFT) - size;
349 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
350 l &= ~0x3f;
351 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
352 l &= ~(0x0f << 8);
353 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530354 l |= GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300355 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
356}
357
358static void gpmc_cs_disable_mem(int cs)
359{
360 u32 l;
361
362 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530363 l &= ~GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300364 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
365}
366
367static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
368{
369 u32 l;
370 u32 mask;
371
372 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
373 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
374 mask = (l >> 8) & 0x0f;
375 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
376}
377
378static int gpmc_cs_mem_enabled(int cs)
379{
380 u32 l;
381
382 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530383 return l & GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300384}
385
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800386int gpmc_cs_set_reserved(int cs, int reserved)
Imre Deakf37e4582006-09-25 12:41:33 +0300387{
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800388 if (cs > GPMC_CS_NUM)
389 return -ENODEV;
390
Imre Deakf37e4582006-09-25 12:41:33 +0300391 gpmc_cs_map &= ~(1 << cs);
392 gpmc_cs_map |= (reserved ? 1 : 0) << cs;
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800393
394 return 0;
Imre Deakf37e4582006-09-25 12:41:33 +0300395}
396
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800397int gpmc_cs_reserved(int cs)
Imre Deakf37e4582006-09-25 12:41:33 +0300398{
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800399 if (cs > GPMC_CS_NUM)
400 return -ENODEV;
401
Imre Deakf37e4582006-09-25 12:41:33 +0300402 return gpmc_cs_map & (1 << cs);
403}
404
405static unsigned long gpmc_mem_align(unsigned long size)
406{
407 int order;
408
409 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
410 order = GPMC_CHUNK_SHIFT - 1;
411 do {
412 size >>= 1;
413 order++;
414 } while (size);
415 size = 1 << order;
416 return size;
417}
418
419static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
420{
421 struct resource *res = &gpmc_cs_mem[cs];
422 int r;
423
424 size = gpmc_mem_align(size);
425 spin_lock(&gpmc_mem_lock);
426 res->start = base;
427 res->end = base + size - 1;
428 r = request_resource(&gpmc_mem_root, res);
429 spin_unlock(&gpmc_mem_lock);
430
431 return r;
432}
433
434int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
435{
436 struct resource *res = &gpmc_cs_mem[cs];
437 int r = -1;
438
439 if (cs > GPMC_CS_NUM)
440 return -ENODEV;
441
442 size = gpmc_mem_align(size);
443 if (size > (1 << GPMC_SECTION_SHIFT))
444 return -ENOMEM;
445
446 spin_lock(&gpmc_mem_lock);
447 if (gpmc_cs_reserved(cs)) {
448 r = -EBUSY;
449 goto out;
450 }
451 if (gpmc_cs_mem_enabled(cs))
452 r = adjust_resource(res, res->start & ~(size - 1), size);
453 if (r < 0)
454 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
455 size, NULL, NULL);
456 if (r < 0)
457 goto out;
458
Tobias Klauser6d135242009-11-10 18:55:19 -0800459 gpmc_cs_enable_mem(cs, res->start, resource_size(res));
Imre Deakf37e4582006-09-25 12:41:33 +0300460 *base = res->start;
461 gpmc_cs_set_reserved(cs, 1);
462out:
463 spin_unlock(&gpmc_mem_lock);
464 return r;
465}
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300466EXPORT_SYMBOL(gpmc_cs_request);
Imre Deakf37e4582006-09-25 12:41:33 +0300467
468void gpmc_cs_free(int cs)
469{
470 spin_lock(&gpmc_mem_lock);
Roel Kluine7fdc602009-11-17 14:39:06 -0800471 if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
Imre Deakf37e4582006-09-25 12:41:33 +0300472 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
473 BUG();
474 spin_unlock(&gpmc_mem_lock);
475 return;
476 }
477 gpmc_cs_disable_mem(cs);
478 release_resource(&gpmc_cs_mem[cs]);
479 gpmc_cs_set_reserved(cs, 0);
480 spin_unlock(&gpmc_mem_lock);
481}
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300482EXPORT_SYMBOL(gpmc_cs_free);
Imre Deakf37e4582006-09-25 12:41:33 +0300483
vimal singh59e9c5a2009-07-13 16:26:24 +0530484/**
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000485 * gpmc_read_status - read access request to get the different gpmc status
486 * @cmd: command type
487 * @return status
488 */
489int gpmc_read_status(int cmd)
490{
491 int status = -EINVAL;
492 u32 regval = 0;
493
494 switch (cmd) {
495 case GPMC_GET_IRQ_STATUS:
496 status = gpmc_read_reg(GPMC_IRQSTATUS);
497 break;
498
499 case GPMC_PREFETCH_FIFO_CNT:
500 regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
501 status = GPMC_PREFETCH_STATUS_FIFO_CNT(regval);
502 break;
503
504 case GPMC_PREFETCH_COUNT:
505 regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
506 status = GPMC_PREFETCH_STATUS_COUNT(regval);
507 break;
508
509 case GPMC_STATUS_BUFFER:
510 regval = gpmc_read_reg(GPMC_STATUS);
511 /* 1 : buffer is available to write */
512 status = regval & GPMC_STATUS_BUFF_EMPTY;
513 break;
514
515 default:
516 printk(KERN_ERR "gpmc_read_status: Not supported\n");
517 }
518 return status;
519}
520EXPORT_SYMBOL(gpmc_read_status);
521
522/**
523 * gpmc_cs_configure - write request to configure gpmc
524 * @cs: chip select number
525 * @cmd: command type
526 * @wval: value to write
527 * @return status of the operation
528 */
529int gpmc_cs_configure(int cs, int cmd, int wval)
530{
531 int err = 0;
532 u32 regval = 0;
533
534 switch (cmd) {
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530535 case GPMC_ENABLE_IRQ:
536 gpmc_write_reg(GPMC_IRQENABLE, wval);
537 break;
538
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000539 case GPMC_SET_IRQ_STATUS:
540 gpmc_write_reg(GPMC_IRQSTATUS, wval);
541 break;
542
543 case GPMC_CONFIG_WP:
544 regval = gpmc_read_reg(GPMC_CONFIG);
545 if (wval)
546 regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
547 else
548 regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
549 gpmc_write_reg(GPMC_CONFIG, regval);
550 break;
551
552 case GPMC_CONFIG_RDY_BSY:
553 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
554 if (wval)
555 regval |= WR_RD_PIN_MONITORING;
556 else
557 regval &= ~WR_RD_PIN_MONITORING;
558 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
559 break;
560
561 case GPMC_CONFIG_DEV_SIZE:
562 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
Yegor Yefremov8ef5d842012-01-23 08:32:23 +0100563
564 /* clear 2 target bits */
565 regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
566
567 /* set the proper value */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000568 regval |= GPMC_CONFIG1_DEVICESIZE(wval);
Yegor Yefremov8ef5d842012-01-23 08:32:23 +0100569
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000570 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
571 break;
572
573 case GPMC_CONFIG_DEV_TYPE:
574 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
575 regval |= GPMC_CONFIG1_DEVICETYPE(wval);
576 if (wval == GPMC_DEVICETYPE_NOR)
577 regval |= GPMC_CONFIG1_MUXADDDATA;
578 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
579 break;
580
581 default:
582 printk(KERN_ERR "gpmc_configure_cs: Not supported\n");
583 err = -EINVAL;
584 }
585
586 return err;
587}
588EXPORT_SYMBOL(gpmc_cs_configure);
589
590/**
591 * gpmc_nand_read - nand specific read access request
592 * @cs: chip select number
593 * @cmd: command type
594 */
595int gpmc_nand_read(int cs, int cmd)
596{
597 int rval = -EINVAL;
598
599 switch (cmd) {
600 case GPMC_NAND_DATA:
601 rval = gpmc_cs_read_byte(cs, GPMC_CS_NAND_DATA);
602 break;
603
604 default:
605 printk(KERN_ERR "gpmc_read_nand_ctrl: Not supported\n");
606 }
607 return rval;
608}
609EXPORT_SYMBOL(gpmc_nand_read);
610
611/**
612 * gpmc_nand_write - nand specific write request
613 * @cs: chip select number
614 * @cmd: command type
615 * @wval: value to write
616 */
617int gpmc_nand_write(int cs, int cmd, int wval)
618{
619 int err = 0;
620
621 switch (cmd) {
622 case GPMC_NAND_COMMAND:
623 gpmc_cs_write_byte(cs, GPMC_CS_NAND_COMMAND, wval);
624 break;
625
626 case GPMC_NAND_ADDRESS:
627 gpmc_cs_write_byte(cs, GPMC_CS_NAND_ADDRESS, wval);
628 break;
629
630 case GPMC_NAND_DATA:
631 gpmc_cs_write_byte(cs, GPMC_CS_NAND_DATA, wval);
632
633 default:
634 printk(KERN_ERR "gpmc_write_nand_ctrl: Not supported\n");
635 err = -EINVAL;
636 }
637 return err;
638}
639EXPORT_SYMBOL(gpmc_nand_write);
640
641
642
643/**
vimal singh59e9c5a2009-07-13 16:26:24 +0530644 * gpmc_prefetch_enable - configures and starts prefetch transfer
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000645 * @cs: cs (chip select) number
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530646 * @fifo_th: fifo threshold to be used for read/ write
vimal singh59e9c5a2009-07-13 16:26:24 +0530647 * @dma_mode: dma mode enable (1) or disable (0)
648 * @u32_count: number of bytes to be transferred
649 * @is_write: prefetch read(0) or write post(1) mode
650 */
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530651int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
vimal singh59e9c5a2009-07-13 16:26:24 +0530652 unsigned int u32_count, int is_write)
653{
vimal singh59e9c5a2009-07-13 16:26:24 +0530654
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530655 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) {
656 pr_err("gpmc: fifo threshold is not supported\n");
657 return -1;
658 } else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
vimal singh59e9c5a2009-07-13 16:26:24 +0530659 /* Set the amount of bytes to be prefetched */
660 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count);
661
662 /* Set dma/mpu mode, the prefetch read / post write and
663 * enable the engine. Set which cs is has requested for.
664 */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000665 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) |
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530666 PREFETCH_FIFOTHRESHOLD(fifo_th) |
vimal singh59e9c5a2009-07-13 16:26:24 +0530667 ENABLE_PREFETCH |
668 (dma_mode << DMA_MPU_MODE) |
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000669 (0x1 & is_write)));
670
671 /* Start the prefetch engine */
672 gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x1);
vimal singh59e9c5a2009-07-13 16:26:24 +0530673 } else {
674 return -EBUSY;
675 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530676
677 return 0;
678}
679EXPORT_SYMBOL(gpmc_prefetch_enable);
680
681/**
682 * gpmc_prefetch_reset - disables and stops the prefetch engine
683 */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000684int gpmc_prefetch_reset(int cs)
vimal singh59e9c5a2009-07-13 16:26:24 +0530685{
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000686 u32 config1;
687
688 /* check if the same module/cs is trying to reset */
689 config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
690 if (((config1 >> CS_NUM_SHIFT) & 0x7) != cs)
691 return -EINVAL;
692
vimal singh59e9c5a2009-07-13 16:26:24 +0530693 /* Stop the PFPW engine */
694 gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x0);
695
696 /* Reset/disable the PFPW engine */
697 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, 0x0);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000698
699 return 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530700}
701EXPORT_SYMBOL(gpmc_prefetch_reset);
702
Afzal Mohammed52bd1382012-08-30 12:53:22 -0700703void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
704{
705 reg->gpmc_status = gpmc_base + GPMC_STATUS;
706 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
707 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
708 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
709 GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
710 reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
711 GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
712 reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
713 reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
714 reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
715 reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
716 reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
717 reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
718 reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
719 reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
720 reg->gpmc_bch_result0 = gpmc_base + GPMC_ECC_BCH_RESULT_0;
721}
722
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700723int gpmc_get_client_irq(unsigned irq_config)
724{
725 int i;
726
727 if (hweight32(irq_config) > 1)
728 return 0;
729
730 for (i = 0; i < GPMC_NR_IRQ; i++)
731 if (gpmc_client_irq[i].bitmask & irq_config)
732 return gpmc_client_irq[i].irq;
733
734 return 0;
735}
736
737static int gpmc_irq_endis(unsigned irq, bool endis)
738{
739 int i;
740 u32 regval;
741
742 for (i = 0; i < GPMC_NR_IRQ; i++)
743 if (irq == gpmc_client_irq[i].irq) {
744 regval = gpmc_read_reg(GPMC_IRQENABLE);
745 if (endis)
746 regval |= gpmc_client_irq[i].bitmask;
747 else
748 regval &= ~gpmc_client_irq[i].bitmask;
749 gpmc_write_reg(GPMC_IRQENABLE, regval);
750 break;
751 }
752
753 return 0;
754}
755
756static void gpmc_irq_disable(struct irq_data *p)
757{
758 gpmc_irq_endis(p->irq, false);
759}
760
761static void gpmc_irq_enable(struct irq_data *p)
762{
763 gpmc_irq_endis(p->irq, true);
764}
765
766static void gpmc_irq_noop(struct irq_data *data) { }
767
768static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
769
770static int gpmc_setup_irq(int gpmc_irq)
771{
772 int i;
773 u32 regval;
774
775 if (!gpmc_irq)
776 return -EINVAL;
777
778 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
779 if (IS_ERR_VALUE(gpmc_irq_start)) {
780 pr_err("irq_alloc_descs failed\n");
781 return gpmc_irq_start;
782 }
783
784 gpmc_irq_chip.name = "gpmc";
785 gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
786 gpmc_irq_chip.irq_enable = gpmc_irq_enable;
787 gpmc_irq_chip.irq_disable = gpmc_irq_disable;
788 gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
789 gpmc_irq_chip.irq_ack = gpmc_irq_noop;
790 gpmc_irq_chip.irq_mask = gpmc_irq_noop;
791 gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
792
793 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
794 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
795
796 for (i = 0; i < GPMC_NR_IRQ; i++) {
797 gpmc_client_irq[i].irq = gpmc_irq_start + i;
798 irq_set_chip_and_handler(gpmc_client_irq[i].irq,
799 &gpmc_irq_chip, handle_simple_irq);
800 set_irq_flags(gpmc_client_irq[i].irq,
801 IRQF_VALID | IRQF_NOAUTOEN);
802 }
803
804 /* Disable interrupts */
805 gpmc_write_reg(GPMC_IRQENABLE, 0);
806
807 /* clear interrupts */
808 regval = gpmc_read_reg(GPMC_IRQSTATUS);
809 gpmc_write_reg(GPMC_IRQSTATUS, regval);
810
811 return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
812}
813
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300814static void __init gpmc_mem_init(void)
Imre Deakf37e4582006-09-25 12:41:33 +0300815{
816 int cs;
817 unsigned long boot_rom_space = 0;
818
Kyungmin Park7f245162006-12-29 16:48:51 -0800819 /* never allocate the first page, to facilitate bug detection;
820 * even if we didn't boot from ROM.
821 */
822 boot_rom_space = BOOT_ROM_SPACE;
823 /* In apollon the CS0 is mapped as 0x0000 0000 */
824 if (machine_is_omap_apollon())
825 boot_rom_space = 0;
Imre Deakf37e4582006-09-25 12:41:33 +0300826 gpmc_mem_root.start = GPMC_MEM_START + boot_rom_space;
827 gpmc_mem_root.end = GPMC_MEM_END;
828
829 /* Reserve all regions that has been set up by bootloader */
830 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
831 u32 base, size;
832
833 if (!gpmc_cs_mem_enabled(cs))
834 continue;
835 gpmc_cs_get_memconf(cs, &base, &size);
836 if (gpmc_cs_insert_mem(cs, base, size) < 0)
837 BUG();
838 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700839}
840
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530841static int __init gpmc_init(void)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700842{
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700843 u32 l;
844 int ret = -EINVAL;
Balaji T K77aded22011-03-18 16:53:20 -0700845 int gpmc_irq;
Kevin Hilman8d084362010-01-29 14:20:06 -0800846 char *ck = NULL;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700847
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300848 if (cpu_is_omap24xx()) {
849 ck = "core_l3_ck";
850 if (cpu_is_omap2420())
851 l = OMAP2420_GPMC_BASE;
852 else
853 l = OMAP34XX_GPMC_BASE;
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -0700854 gpmc_irq = 20 + OMAP_INTC_START;
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300855 } else if (cpu_is_omap34xx()) {
856 ck = "gpmc_fck";
857 l = OMAP34XX_GPMC_BASE;
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -0700858 gpmc_irq = 20 + OMAP_INTC_START;
R Sricharan1a5da212012-04-19 18:03:02 +0530859 } else if (cpu_is_omap44xx() || soc_is_omap54xx()) {
860 /* Base address and irq number are same for OMAP4/5 */
Rajendra Nayakd79b1262009-12-09 00:01:44 +0530861 ck = "gpmc_ck";
Santosh Shilimkar44169072009-05-28 14:16:04 -0700862 l = OMAP44XX_GPMC_BASE;
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -0700863 gpmc_irq = 20 + OMAP44XX_IRQ_GIC_START;
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300864 }
865
Kevin Hilman8d084362010-01-29 14:20:06 -0800866 if (WARN_ON(!ck))
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530867 return ret;
Kevin Hilman8d084362010-01-29 14:20:06 -0800868
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300869 gpmc_l3_clk = clk_get(NULL, ck);
870 if (IS_ERR(gpmc_l3_clk)) {
871 printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
Sanjeev Premi85d7a072008-11-04 13:35:06 -0800872 BUG();
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300873 }
874
875 gpmc_base = ioremap(l, SZ_4K);
876 if (!gpmc_base) {
877 clk_put(gpmc_l3_clk);
878 printk(KERN_ERR "Could not get GPMC register memory\n");
Sanjeev Premi85d7a072008-11-04 13:35:06 -0800879 BUG();
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300880 }
881
Olof Johansson1daa8c12010-01-20 22:39:29 +0000882 clk_enable(gpmc_l3_clk);
883
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700884 l = gpmc_read_reg(GPMC_REVISION);
885 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
886 /* Set smart idle mode and automatic L3 clock gating */
887 l = gpmc_read_reg(GPMC_SYSCONFIG);
888 l &= 0x03 << 3;
889 l |= (0x02 << 3) | (1 << 0);
890 gpmc_write_reg(GPMC_SYSCONFIG, l);
Imre Deakf37e4582006-09-25 12:41:33 +0300891 gpmc_mem_init();
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530892
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700893 ret = gpmc_setup_irq(gpmc_irq);
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530894 if (ret)
895 pr_err("gpmc: irq-%d could not claim: err %d\n",
Balaji T K77aded22011-03-18 16:53:20 -0700896 gpmc_irq, ret);
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530897 return ret;
898}
899postcore_initcall(gpmc_init);
900
901static irqreturn_t gpmc_handle_irq(int irq, void *dev)
902{
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700903 int i;
904 u32 regval;
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530905
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700906 regval = gpmc_read_reg(GPMC_IRQSTATUS);
907
908 if (!regval)
909 return IRQ_NONE;
910
911 for (i = 0; i < GPMC_NR_IRQ; i++)
912 if (regval & gpmc_client_irq[i].bitmask)
913 generic_handle_irq(gpmc_client_irq[i].irq);
914
915 gpmc_write_reg(GPMC_IRQSTATUS, regval);
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530916
917 return IRQ_HANDLED;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700918}
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530919
920#ifdef CONFIG_ARCH_OMAP3
921static struct omap3_gpmc_regs gpmc_context;
922
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800923void omap3_gpmc_save_context(void)
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530924{
925 int i;
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800926
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530927 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
928 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
929 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
930 gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
931 gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
932 gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
933 gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
934 for (i = 0; i < GPMC_CS_NUM; i++) {
935 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
936 if (gpmc_context.cs_context[i].is_valid) {
937 gpmc_context.cs_context[i].config1 =
938 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
939 gpmc_context.cs_context[i].config2 =
940 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
941 gpmc_context.cs_context[i].config3 =
942 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
943 gpmc_context.cs_context[i].config4 =
944 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
945 gpmc_context.cs_context[i].config5 =
946 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
947 gpmc_context.cs_context[i].config6 =
948 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
949 gpmc_context.cs_context[i].config7 =
950 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
951 }
952 }
953}
954
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800955void omap3_gpmc_restore_context(void)
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530956{
957 int i;
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800958
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530959 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
960 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
961 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
962 gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
963 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
964 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
965 gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
966 for (i = 0; i < GPMC_CS_NUM; i++) {
967 if (gpmc_context.cs_context[i].is_valid) {
968 gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
969 gpmc_context.cs_context[i].config1);
970 gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
971 gpmc_context.cs_context[i].config2);
972 gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
973 gpmc_context.cs_context[i].config3);
974 gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
975 gpmc_context.cs_context[i].config4);
976 gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
977 gpmc_context.cs_context[i].config5);
978 gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
979 gpmc_context.cs_context[i].config6);
980 gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
981 gpmc_context.cs_context[i].config7);
982 }
983 }
984}
985#endif /* CONFIG_ARCH_OMAP3 */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000986
987/**
988 * gpmc_enable_hwecc - enable hardware ecc functionality
989 * @cs: chip select number
990 * @mode: read/write mode
991 * @dev_width: device bus width(1 for x16, 0 for x8)
992 * @ecc_size: bytes for which ECC will be generated
993 */
994int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
995{
996 unsigned int val;
997
998 /* check if ecc module is in used */
999 if (gpmc_ecc_used != -EINVAL)
1000 return -EINVAL;
1001
1002 gpmc_ecc_used = cs;
1003
1004 /* clear ecc and enable bits */
Yegor Yefremov2c65e742012-05-09 08:32:49 -07001005 gpmc_write_reg(GPMC_ECC_CONTROL,
1006 GPMC_ECC_CTRL_ECCCLEAR |
1007 GPMC_ECC_CTRL_ECCREG1);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +00001008
1009 /* program ecc and result sizes */
1010 val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
1011 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, val);
1012
1013 switch (mode) {
1014 case GPMC_ECC_READ:
Yegor Yefremov2c65e742012-05-09 08:32:49 -07001015 case GPMC_ECC_WRITE:
1016 gpmc_write_reg(GPMC_ECC_CONTROL,
1017 GPMC_ECC_CTRL_ECCCLEAR |
1018 GPMC_ECC_CTRL_ECCREG1);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +00001019 break;
1020 case GPMC_ECC_READSYN:
Yegor Yefremov2c65e742012-05-09 08:32:49 -07001021 gpmc_write_reg(GPMC_ECC_CONTROL,
1022 GPMC_ECC_CTRL_ECCCLEAR |
1023 GPMC_ECC_CTRL_ECCDISABLE);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +00001024 break;
1025 default:
1026 printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
1027 break;
1028 }
1029
1030 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
1031 val = (dev_width << 7) | (cs << 1) | (0x1);
1032 gpmc_write_reg(GPMC_ECC_CONFIG, val);
1033 return 0;
1034}
Bernhard Wallef611b022012-03-05 16:11:01 -08001035EXPORT_SYMBOL_GPL(gpmc_enable_hwecc);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +00001036
1037/**
1038 * gpmc_calculate_ecc - generate non-inverted ecc bytes
1039 * @cs: chip select number
1040 * @dat: data pointer over which ecc is computed
1041 * @ecc_code: ecc code buffer
1042 *
1043 * Using non-inverted ECC is considered ugly since writing a blank
1044 * page (padding) will clear the ECC bytes. This is not a problem as long
1045 * no one is trying to write data on the seemingly unused page. Reading
1046 * an erased page will produce an ECC mismatch between generated and read
1047 * ECC bytes that has to be dealt with separately.
1048 */
1049int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code)
1050{
1051 unsigned int val = 0x0;
1052
1053 if (gpmc_ecc_used != cs)
1054 return -EINVAL;
1055
1056 /* read ecc result */
1057 val = gpmc_read_reg(GPMC_ECC1_RESULT);
1058 *ecc_code++ = val; /* P128e, ..., P1e */
1059 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
1060 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
1061 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
1062
1063 gpmc_ecc_used = -EINVAL;
1064 return 0;
1065}
Bernhard Wallef611b022012-03-05 16:11:01 -08001066EXPORT_SYMBOL_GPL(gpmc_calculate_ecc);
Ivan Djelic8d602cf2012-04-26 14:17:49 +02001067
1068#ifdef CONFIG_ARCH_OMAP3
1069
1070/**
1071 * gpmc_init_hwecc_bch - initialize hardware BCH ecc functionality
1072 * @cs: chip select number
1073 * @nsectors: how many 512-byte sectors to process
1074 * @nerrors: how many errors to correct per sector (4 or 8)
1075 *
1076 * This function must be executed before any call to gpmc_enable_hwecc_bch.
1077 */
1078int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors)
1079{
1080 /* check if ecc module is in use */
1081 if (gpmc_ecc_used != -EINVAL)
1082 return -EINVAL;
1083
1084 /* support only OMAP3 class */
1085 if (!cpu_is_omap34xx()) {
1086 printk(KERN_ERR "BCH ecc is not supported on this CPU\n");
1087 return -EINVAL;
1088 }
1089
1090 /*
1091 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1.
1092 * Other chips may be added if confirmed to work.
1093 */
1094 if ((nerrors == 4) &&
1095 (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) {
1096 printk(KERN_ERR "BCH 4-bit mode is not supported on this CPU\n");
1097 return -EINVAL;
1098 }
1099
1100 /* sanity check */
1101 if (nsectors > 8) {
1102 printk(KERN_ERR "BCH cannot process %d sectors (max is 8)\n",
1103 nsectors);
1104 return -EINVAL;
1105 }
1106
1107 return 0;
1108}
1109EXPORT_SYMBOL_GPL(gpmc_init_hwecc_bch);
1110
1111/**
1112 * gpmc_enable_hwecc_bch - enable hardware BCH ecc functionality
1113 * @cs: chip select number
1114 * @mode: read/write mode
1115 * @dev_width: device bus width(1 for x16, 0 for x8)
1116 * @nsectors: how many 512-byte sectors to process
1117 * @nerrors: how many errors to correct per sector (4 or 8)
1118 */
1119int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
1120 int nerrors)
1121{
1122 unsigned int val;
1123
1124 /* check if ecc module is in use */
1125 if (gpmc_ecc_used != -EINVAL)
1126 return -EINVAL;
1127
1128 gpmc_ecc_used = cs;
1129
1130 /* clear ecc and enable bits */
1131 gpmc_write_reg(GPMC_ECC_CONTROL, 0x1);
1132
1133 /*
1134 * When using BCH, sector size is hardcoded to 512 bytes.
1135 * Here we are using wrapping mode 6 both for reading and writing, with:
1136 * size0 = 0 (no additional protected byte in spare area)
1137 * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1138 */
1139 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, (32 << 22) | (0 << 12));
1140
1141 /* BCH configuration */
1142 val = ((1 << 16) | /* enable BCH */
1143 (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
1144 (0x06 << 8) | /* wrap mode = 6 */
1145 (dev_width << 7) | /* bus width */
1146 (((nsectors-1) & 0x7) << 4) | /* number of sectors */
1147 (cs << 1) | /* ECC CS */
1148 (0x1)); /* enable ECC */
1149
1150 gpmc_write_reg(GPMC_ECC_CONFIG, val);
1151 gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
1152 return 0;
1153}
1154EXPORT_SYMBOL_GPL(gpmc_enable_hwecc_bch);
1155
1156/**
1157 * gpmc_calculate_ecc_bch4 - Generate 7 ecc bytes per sector of 512 data bytes
1158 * @cs: chip select number
1159 * @dat: The pointer to data on which ecc is computed
1160 * @ecc: The ecc output buffer
1161 */
1162int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc)
1163{
1164 int i;
1165 unsigned long nsectors, reg, val1, val2;
1166
1167 if (gpmc_ecc_used != cs)
1168 return -EINVAL;
1169
1170 nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
1171
1172 for (i = 0; i < nsectors; i++) {
1173
1174 reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
1175
1176 /* Read hw-computed remainder */
1177 val1 = gpmc_read_reg(reg + 0);
1178 val2 = gpmc_read_reg(reg + 4);
1179
1180 /*
1181 * Add constant polynomial to remainder, in order to get an ecc
1182 * sequence of 0xFFs for a buffer filled with 0xFFs; and
1183 * left-justify the resulting polynomial.
1184 */
1185 *ecc++ = 0x28 ^ ((val2 >> 12) & 0xFF);
1186 *ecc++ = 0x13 ^ ((val2 >> 4) & 0xFF);
1187 *ecc++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
1188 *ecc++ = 0x39 ^ ((val1 >> 20) & 0xFF);
1189 *ecc++ = 0x96 ^ ((val1 >> 12) & 0xFF);
1190 *ecc++ = 0xac ^ ((val1 >> 4) & 0xFF);
1191 *ecc++ = 0x7f ^ ((val1 & 0xF) << 4);
1192 }
1193
1194 gpmc_ecc_used = -EINVAL;
1195 return 0;
1196}
1197EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch4);
1198
1199/**
1200 * gpmc_calculate_ecc_bch8 - Generate 13 ecc bytes per block of 512 data bytes
1201 * @cs: chip select number
1202 * @dat: The pointer to data on which ecc is computed
1203 * @ecc: The ecc output buffer
1204 */
1205int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc)
1206{
1207 int i;
1208 unsigned long nsectors, reg, val1, val2, val3, val4;
1209
1210 if (gpmc_ecc_used != cs)
1211 return -EINVAL;
1212
1213 nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
1214
1215 for (i = 0; i < nsectors; i++) {
1216
1217 reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
1218
1219 /* Read hw-computed remainder */
1220 val1 = gpmc_read_reg(reg + 0);
1221 val2 = gpmc_read_reg(reg + 4);
1222 val3 = gpmc_read_reg(reg + 8);
1223 val4 = gpmc_read_reg(reg + 12);
1224
1225 /*
1226 * Add constant polynomial to remainder, in order to get an ecc
1227 * sequence of 0xFFs for a buffer filled with 0xFFs.
1228 */
1229 *ecc++ = 0xef ^ (val4 & 0xFF);
1230 *ecc++ = 0x51 ^ ((val3 >> 24) & 0xFF);
1231 *ecc++ = 0x2e ^ ((val3 >> 16) & 0xFF);
1232 *ecc++ = 0x09 ^ ((val3 >> 8) & 0xFF);
1233 *ecc++ = 0xed ^ (val3 & 0xFF);
1234 *ecc++ = 0x93 ^ ((val2 >> 24) & 0xFF);
1235 *ecc++ = 0x9a ^ ((val2 >> 16) & 0xFF);
1236 *ecc++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
1237 *ecc++ = 0x97 ^ (val2 & 0xFF);
1238 *ecc++ = 0x79 ^ ((val1 >> 24) & 0xFF);
1239 *ecc++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
1240 *ecc++ = 0x24 ^ ((val1 >> 8) & 0xFF);
1241 *ecc++ = 0xb5 ^ (val1 & 0xFF);
1242 }
1243
1244 gpmc_ecc_used = -EINVAL;
1245 return 0;
1246}
1247EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch8);
1248
1249#endif /* CONFIG_ARCH_OMAP3 */