blob: 56aa6b75213d86d0442823e9b4163118231f9f30 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
Todd Poynor02b15e32005-06-07 00:04:39 +01007 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
Thomas Gleixner1f948b42005-11-07 11:15:37 +000013 * XIP support hooks by Vitaly Wool (based on code for Intel flash
Todd Poynor02b15e32005-06-07 00:04:39 +010014 * by Nicolas Pitre)
Thomas Gleixner1f948b42005-11-07 11:15:37 +000015 *
Christopher Moore87e92c02008-10-17 05:32:22 +020016 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19 *
20 * This code is GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
Kevin Cernekeeeafe1312010-04-29 10:26:56 -070034#include <linux/reboot.h>
Stefan Roese1648eaa2013-01-18 13:10:05 +010035#include <linux/of.h>
36#include <linux/of_platform.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/cfi.h>
Todd Poynor02b15e32005-06-07 00:04:39 +010040#include <linux/mtd/xip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0
44
45#define MAX_WORD_RETRIES 3
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#define SST49LF004B 0x0060
Ryan Jackson89072ef2006-10-20 14:41:03 -070048#define SST49LF040B 0x0050
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +010049#define SST49LF008A 0x005a
Haavard Skinnemoen01655082006-08-09 11:06:07 +020050#define AT49BV6416 0x00d6
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57static void cfi_amdstd_sync (struct mtd_info *);
58static int cfi_amdstd_suspend (struct mtd_info *);
59static void cfi_amdstd_resume (struct mtd_info *);
Kevin Cernekeeeafe1312010-04-29 10:26:56 -070060static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
Christian Rieschdc7e9ec2014-03-06 13:18:27 +010061static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
62 size_t *, struct otp_info *);
63static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
64 size_t *, struct otp_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
Christian Rieschdc7e9ec2014-03-06 13:18:27 +010066static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
67 size_t *, u_char *);
68static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
69 size_t *, u_char *);
Christian Rieschaf744752014-03-06 13:18:29 +010070static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
71 size_t *, u_char *);
Christian Riesch4f5cb242014-03-06 13:18:30 +010072static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Ira W. Snyder30ec5a22012-01-06 11:29:19 -080074static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
75 size_t *retlen, const u_char *buf);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static void cfi_amdstd_destroy(struct mtd_info *);
78
79struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
80static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
81
82static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
83static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
84#include "fwh_lock.h"
85
Adrian Hunter69423d92008-12-10 13:37:21 +000086static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
87static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
Haavard Skinnemoen01655082006-08-09 11:06:07 +020088
Stefan Roese1648eaa2013-01-18 13:10:05 +010089static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
90static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
91static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static struct mtd_chip_driver cfi_amdstd_chipdrv = {
94 .probe = NULL, /* Not usable directly */
95 .destroy = cfi_amdstd_destroy,
96 .name = "cfi_cmdset_0002",
97 .module = THIS_MODULE
98};
99
100
101/* #define DEBUG_CFI_FEATURES */
102
103
104#ifdef DEBUG_CFI_FEATURES
105static void cfi_tell_features(struct cfi_pri_amdstd *extp)
106{
107 const char* erase_suspend[3] = {
108 "Not supported", "Read only", "Read/write"
109 };
110 const char* top_bottom[6] = {
111 "No WP", "8x8KiB sectors at top & bottom, no WP",
112 "Bottom boot", "Top boot",
113 "Uniform, Bottom WP", "Uniform, Top WP"
114 };
115
116 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000117 printk(" Address sensitive unlock: %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 (extp->SiliconRevision & 1) ? "Not required" : "Required");
119
120 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
121 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
122 else
123 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
124
125 if (extp->BlkProt == 0)
126 printk(" Block protection: Not supported\n");
127 else
128 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
129
130
131 printk(" Temporary block unprotect: %s\n",
132 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
133 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
134 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
135 printk(" Burst mode: %s\n",
136 extp->BurstMode ? "Supported" : "Not supported");
137 if (extp->PageMode == 0)
138 printk(" Page mode: Not supported\n");
139 else
140 printk(" Page mode: %d word page\n", extp->PageMode << 2);
141
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000142 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 extp->VppMin >> 4, extp->VppMin & 0xf);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000144 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 extp->VppMax >> 4, extp->VppMax & 0xf);
146
147 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
148 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
149 else
150 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
151}
152#endif
153
154#ifdef AMD_BOOTLOC_BUG
155/* Wheee. Bring me the head of someone at AMD. */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100156static void fixup_amd_bootblock(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 struct map_info *map = mtd->priv;
159 struct cfi_private *cfi = map->fldrv_priv;
160 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
161 __u8 major = extp->MajorVersion;
162 __u8 minor = extp->MinorVersion;
163
164 if (((major << 8) | minor) < 0x3131) {
165 /* CFI version 1.0 => don't trust bootloc */
Christopher Moore87e92c02008-10-17 05:32:22 +0200166
Brian Norris289c0522011-07-19 10:06:09 -0700167 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
Christopher Moore87e92c02008-10-17 05:32:22 +0200168 map->name, cfi->mfr, cfi->id);
169
170 /* AFAICS all 29LV400 with a bottom boot block have a device ID
171 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
172 * These were badly detected as they have the 0x80 bit set
173 * so treat them as a special case.
174 */
175 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
176
177 /* Macronix added CFI to their 2nd generation
178 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
179 * Fujitsu, Spansion, EON, ESI and older Macronix)
180 * has CFI.
181 *
182 * Therefore also check the manufacturer.
183 * This reduces the risk of false detection due to
184 * the 8-bit device ID.
185 */
Guillaume LECERFf3e69c62009-12-15 23:01:06 +0100186 (cfi->mfr == CFI_MFR_MACRONIX)) {
Brian Norris289c0522011-07-19 10:06:09 -0700187 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
Christopher Moore87e92c02008-10-17 05:32:22 +0200188 " detected\n", map->name);
189 extp->TopBottom = 2; /* bottom boot */
190 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (cfi->id & 0x80) {
192 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
193 extp->TopBottom = 3; /* top boot */
194 } else {
195 extp->TopBottom = 2; /* bottom boot */
196 }
Christopher Moore87e92c02008-10-17 05:32:22 +0200197
Brian Norris289c0522011-07-19 10:06:09 -0700198 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
Christopher Moore87e92c02008-10-17 05:32:22 +0200199 " deduced %s from Device ID\n", map->name, major, minor,
200 extp->TopBottom == 2 ? "bottom" : "top");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 }
202}
203#endif
204
Guillaume LECERFcc318222010-11-17 12:35:50 +0100205static void fixup_use_write_buffers(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
207 struct map_info *map = mtd->priv;
208 struct cfi_private *cfi = map->fldrv_priv;
209 if (cfi->cfiq->BufWriteTimeoutTyp) {
Brian Norris289c0522011-07-19 10:06:09 -0700210 pr_debug("Using buffer write method\n" );
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200211 mtd->_write = cfi_amdstd_write_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 }
213}
214
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200215/* Atmel chips don't use the same PRI format as AMD chips */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100216static void fixup_convert_atmel_pri(struct mtd_info *mtd)
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200217{
218 struct map_info *map = mtd->priv;
219 struct cfi_private *cfi = map->fldrv_priv;
220 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
221 struct cfi_pri_atmel atmel_pri;
222
223 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
HÃ¥vard Skinnemoende591da2006-09-15 17:19:31 +0200224 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200225
226 if (atmel_pri.Features & 0x02)
227 extp->EraseSuspend = 2;
228
Haavard Skinnemoenbe8f78b2008-09-30 13:55:33 +0200229 /* Some chips got it backwards... */
230 if (cfi->id == AT49BV6416) {
231 if (atmel_pri.BottomBoot)
232 extp->TopBottom = 3;
233 else
234 extp->TopBottom = 2;
235 } else {
236 if (atmel_pri.BottomBoot)
237 extp->TopBottom = 2;
238 else
239 extp->TopBottom = 3;
240 }
Hans-Christian Egtvedtd10a39d2007-10-30 16:33:07 +0100241
242 /* burst write mode not supported */
243 cfi->cfiq->BufWriteTimeoutTyp = 0;
244 cfi->cfiq->BufWriteTimeoutMax = 0;
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200245}
246
Guillaume LECERFcc318222010-11-17 12:35:50 +0100247static void fixup_use_secsi(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
249 /* Setup for chips with a secsi area */
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200250 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
251 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252}
253
Guillaume LECERFcc318222010-11-17 12:35:50 +0100254static void fixup_use_erase_chip(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255{
256 struct map_info *map = mtd->priv;
257 struct cfi_private *cfi = map->fldrv_priv;
258 if ((cfi->cfiq->NumEraseRegions == 1) &&
259 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200260 mtd->_erase = cfi_amdstd_erase_chip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263}
264
Haavard Skinnemoen01655082006-08-09 11:06:07 +0200265/*
266 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
267 * locked by default.
268 */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100269static void fixup_use_atmel_lock(struct mtd_info *mtd)
Haavard Skinnemoen01655082006-08-09 11:06:07 +0200270{
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200271 mtd->_lock = cfi_atmel_lock;
272 mtd->_unlock = cfi_atmel_unlock;
Justin Treone619a752008-01-30 10:25:49 -0800273 mtd->flags |= MTD_POWERUP_LOCK;
Haavard Skinnemoen01655082006-08-09 11:06:07 +0200274}
275
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200276static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
277{
278 struct map_info *map = mtd->priv;
279 struct cfi_private *cfi = map->fldrv_priv;
280
281 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300282 * These flashes report two separate eraseblock regions based on the
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200283 * sector_erase-size and block_erase-size, although they both operate on the
284 * same memory. This is not allowed according to CFI, so we just pick the
285 * sector_erase-size.
286 */
287 cfi->cfiq->NumEraseRegions = 1;
288}
289
Guillaume LECERFcc318222010-11-17 12:35:50 +0100290static void fixup_sst39vf(struct mtd_info *mtd)
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200291{
292 struct map_info *map = mtd->priv;
293 struct cfi_private *cfi = map->fldrv_priv;
294
295 fixup_old_sst_eraseregion(mtd);
296
297 cfi->addr_unlock1 = 0x5555;
298 cfi->addr_unlock2 = 0x2AAA;
299}
300
Guillaume LECERFcc318222010-11-17 12:35:50 +0100301static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
Guillaume LECERF5a0563f2010-04-24 17:58:27 +0200302{
303 struct map_info *map = mtd->priv;
304 struct cfi_private *cfi = map->fldrv_priv;
305
306 fixup_old_sst_eraseregion(mtd);
307
308 cfi->addr_unlock1 = 0x555;
309 cfi->addr_unlock2 = 0x2AA;
Guillaume LECERF08968042010-10-26 10:45:23 +0100310
311 cfi->sector_erase_cmd = CMD(0x50);
Guillaume LECERF5a0563f2010-04-24 17:58:27 +0200312}
313
Guillaume LECERFcc318222010-11-17 12:35:50 +0100314static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
Guillaume LECERF9fc05fc2010-10-26 11:31:55 +0100315{
316 struct map_info *map = mtd->priv;
317 struct cfi_private *cfi = map->fldrv_priv;
318
Guillaume LECERFcc318222010-11-17 12:35:50 +0100319 fixup_sst39vf_rev_b(mtd);
Guillaume LECERF9fc05fc2010-10-26 11:31:55 +0100320
321 /*
322 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
323 * it should report a size of 8KBytes (0x0020*256).
324 */
325 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
Joe Perchese8348dc2017-02-16 23:11:37 -0800326 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
327 mtd->name);
Guillaume LECERF9fc05fc2010-10-26 11:31:55 +0100328}
329
Guillaume LECERFcc318222010-11-17 12:35:50 +0100330static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
Trent Piepho70b07252008-03-30 21:19:30 -0700331{
332 struct map_info *map = mtd->priv;
333 struct cfi_private *cfi = map->fldrv_priv;
334
335 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
336 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
Joe Perchese8348dc2017-02-16 23:11:37 -0800337 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
338 mtd->name);
Trent Piepho70b07252008-03-30 21:19:30 -0700339 }
340}
341
Guillaume LECERFcc318222010-11-17 12:35:50 +0100342static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
Trent Piepho70b07252008-03-30 21:19:30 -0700343{
344 struct map_info *map = mtd->priv;
345 struct cfi_private *cfi = map->fldrv_priv;
346
347 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
348 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
Joe Perchese8348dc2017-02-16 23:11:37 -0800349 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
350 mtd->name);
Trent Piepho70b07252008-03-30 21:19:30 -0700351 }
352}
353
Javier Martin43dc03c2012-05-11 12:15:41 +0200354static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
355{
356 struct map_info *map = mtd->priv;
357 struct cfi_private *cfi = map->fldrv_priv;
358
359 /*
360 * S29NS512P flash uses more than 8bits to report number of sectors,
361 * which is not permitted by CFI.
362 */
363 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
Joe Perchese8348dc2017-02-16 23:11:37 -0800364 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
365 mtd->name);
Javier Martin43dc03c2012-05-11 12:15:41 +0200366}
367
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200368/* Used to fix CFI-Tables of chips without Extended Query Tables */
369static struct cfi_fixup cfi_nopri_fixup_table[] = {
Guillaume LECERFcc318222010-11-17 12:35:50 +0100370 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
371 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
372 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
373 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
374 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
375 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
376 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
377 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
378 { 0, 0, NULL }
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200379};
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381static struct cfi_fixup cfi_fixup_table[] = {
Guillaume LECERFcc318222010-11-17 12:35:50 +0100382 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383#ifdef AMD_BOOTLOC_BUG
Guillaume LECERFcc318222010-11-17 12:35:50 +0100384 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
Steffen Sledz1065cda82011-03-10 09:05:12 +0100385 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
Guillaume LECERFcc318222010-11-17 12:35:50 +0100386 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387#endif
Guillaume LECERFcc318222010-11-17 12:35:50 +0100388 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
389 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
390 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
391 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
392 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
393 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
394 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
395 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
396 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
397 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
Javier Martin43dc03c2012-05-11 12:15:41 +0200398 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
Guillaume LECERFcc318222010-11-17 12:35:50 +0100399 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
400 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
401 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
402 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403#if !FORCE_WORD_WRITE
Guillaume LECERFcc318222010-11-17 12:35:50 +0100404 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405#endif
Guillaume LECERFcc318222010-11-17 12:35:50 +0100406 { 0, 0, NULL }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407};
408static struct cfi_fixup jedec_fixup_table[] = {
Guillaume LECERFcc318222010-11-17 12:35:50 +0100409 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
410 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
411 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
412 { 0, 0, NULL }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413};
414
415static struct cfi_fixup fixup_table[] = {
416 /* The CFI vendor ids and the JEDEC vendor IDs appear
417 * to be common. It is like the devices id's are as
418 * well. This table is to pick all cases where
419 * we know that is the case.
420 */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100421 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
422 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
423 { 0, 0, NULL }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424};
425
426
Wolfgang Grandeggerfefae482009-01-08 19:21:27 +0100427static void cfi_fixup_major_minor(struct cfi_private *cfi,
428 struct cfi_pri_amdstd *extp)
429{
Guillaume LECERFe6372762010-12-17 10:59:41 +0100430 if (cfi->mfr == CFI_MFR_SAMSUNG) {
Guillaume LECERFe8953b72010-12-17 10:59:47 +0100431 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
432 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
Guillaume LECERFe6372762010-12-17 10:59:41 +0100433 /*
434 * Samsung K8P2815UQB and K8D6x16UxM chips
435 * report major=0 / minor=0.
Guillaume LECERFe8953b72010-12-17 10:59:47 +0100436 * K8D3x16UxC chips report major=3 / minor=3.
Guillaume LECERFe6372762010-12-17 10:59:41 +0100437 */
438 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
439 " Extended Query version to 1.%c\n",
440 extp->MinorVersion);
441 extp->MajorVersion = '1';
442 }
443 }
444
Guillaume LECERF9fc05fc2010-10-26 11:31:55 +0100445 /*
446 * SST 38VF640x chips report major=0xFF / minor=0xFF.
447 */
448 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
449 extp->MajorVersion = '1';
450 extp->MinorVersion = '0';
451 }
Wolfgang Grandeggerfefae482009-01-08 19:21:27 +0100452}
453
Gerlando Falauto42096282012-07-03 09:09:47 +0200454static int is_m29ew(struct cfi_private *cfi)
455{
456 if (cfi->mfr == CFI_MFR_INTEL &&
457 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
458 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
459 return 1;
460 return 0;
461}
462
463/*
464 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
465 * Some revisions of the M29EW suffer from erase suspend hang ups. In
466 * particular, it can occur when the sequence
467 * Erase Confirm -> Suspend -> Program -> Resume
468 * causes a lockup due to internal timing issues. The consequence is that the
469 * erase cannot be resumed without inserting a dummy command after programming
470 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
471 * that writes an F0 command code before the RESUME command.
472 */
473static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
474 unsigned long adr)
475{
476 struct cfi_private *cfi = map->fldrv_priv;
477 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
478 if (is_m29ew(cfi))
479 map_write(map, CMD(0xF0), adr);
480}
481
482/*
483 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
484 *
485 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
486 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
487 * command is issued after an ERASE RESUME operation without waiting for a
488 * minimum delay. The result is that once the ERASE seems to be completed
489 * (no bits are toggling), the contents of the Flash memory block on which
490 * the erase was ongoing could be inconsistent with the expected values
491 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
492 * values), causing a consequent failure of the ERASE operation.
493 * The occurrence of this issue could be high, especially when file system
494 * operations on the Flash are intensive. As a result, it is recommended
495 * that a patch be applied. Intensive file system operations can cause many
496 * calls to the garbage routine to free Flash space (also by erasing physical
497 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
498 * commands can occur. The problem disappears when a delay is inserted after
499 * the RESUME command by using the udelay() function available in Linux.
500 * The DELAY value must be tuned based on the customer's platform.
501 * The maximum value that fixes the problem in all cases is 500us.
502 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
503 * in most cases.
504 * We have chosen 500µs because this latency is acceptable.
505 */
506static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
507{
508 /*
509 * Resolving the Delay After Resume Issue see Micron TN-13-07
510 * Worst case delay must be 500µs but 30-50µs should be ok as well
511 */
512 if (is_m29ew(cfi))
513 cfi_udelay(500);
514}
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
517{
518 struct cfi_private *cfi = map->fldrv_priv;
Stefan Roese1648eaa2013-01-18 13:10:05 +0100519 struct device_node __maybe_unused *np = map->device_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 struct mtd_info *mtd;
521 int i;
522
Burman Yan95b93a02006-11-15 21:10:29 +0200523 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
Jingoo Han5c8b1fb2014-02-06 15:19:35 +0900524 if (!mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 mtd->priv = map;
527 mtd->type = MTD_NORFLASH;
528
529 /* Fill in the default mtd operations */
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200530 mtd->_erase = cfi_amdstd_erase_varsize;
531 mtd->_write = cfi_amdstd_write_words;
532 mtd->_read = cfi_amdstd_read;
533 mtd->_sync = cfi_amdstd_sync;
534 mtd->_suspend = cfi_amdstd_suspend;
535 mtd->_resume = cfi_amdstd_resume;
Christian Rieschdc7e9ec2014-03-06 13:18:27 +0100536 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
537 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
538 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
539 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
Christian Rieschaf744752014-03-06 13:18:29 +0100540 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
Christian Riesch4f5cb242014-03-06 13:18:30 +0100541 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 mtd->flags = MTD_CAP_NORFLASH;
543 mtd->name = map->name;
Artem B. Bityutskiy783ed812006-06-14 19:53:44 +0400544 mtd->writesize = 1;
Anatolij Gustschin13ce77f2011-02-10 16:01:46 +0100545 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
Anatolij Gustschind261c722010-12-16 23:42:15 +0100546
Brian Norris0a32a102011-07-19 10:06:10 -0700547 pr_debug("MTD %s(): write buffer size %d\n", __func__,
548 mtd->writebufsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200550 mtd->_panic_write = cfi_amdstd_panic_write;
Kevin Cernekeeeafe1312010-04-29 10:26:56 -0700551 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 if (cfi->cfi_mode==CFI_MODE_CFI){
554 unsigned char bootloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
556 struct cfi_pri_amdstd *extp;
557
558 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
Guillaume LECERF564b8492010-04-24 17:58:17 +0200559 if (extp) {
560 /*
561 * It's a real CFI chip, not one for which the probe
562 * routine faked a CFI structure.
563 */
564 cfi_fixup_major_minor(cfi, extp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200566 /*
Gernot Hoylerc9ddab22011-04-11 15:53:35 +0200567 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
Justin P. Mattock631dd1a2010-10-18 11:03:14 +0200568 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
569 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
Guillaume LECERF5da19532010-08-05 13:55:24 +0200570 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
Gernot Hoylerc9ddab22011-04-11 15:53:35 +0200571 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200572 */
Guillaume LECERF564b8492010-04-24 17:58:17 +0200573 if (extp->MajorVersion != '1' ||
Gernot Hoylerc9ddab22011-04-11 15:53:35 +0200574 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
Guillaume LECERF564b8492010-04-24 17:58:17 +0200575 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200576 "version %c.%c (%#02x/%#02x).\n",
577 extp->MajorVersion, extp->MinorVersion,
578 extp->MajorVersion, extp->MinorVersion);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200579 kfree(extp);
580 kfree(mtd);
581 return NULL;
582 }
Wolfgang Grandeggerfefae482009-01-08 19:21:27 +0100583
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200584 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
585 extp->MajorVersion, extp->MinorVersion);
586
Guillaume LECERF564b8492010-04-24 17:58:17 +0200587 /* Install our own private info structure */
588 cfi->cmdset_priv = extp;
Todd Poynord88f9772005-07-20 22:01:17 +0100589
Guillaume LECERF564b8492010-04-24 17:58:17 +0200590 /* Apply cfi device specific fixups */
591 cfi_fixup(mtd, cfi_fixup_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593#ifdef DEBUG_CFI_FEATURES
Guillaume LECERF564b8492010-04-24 17:58:17 +0200594 /* Tell the user about it in lots of lovely detail */
595 cfi_tell_features(extp);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000596#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Stefan Roese1648eaa2013-01-18 13:10:05 +0100598#ifdef CONFIG_OF
599 if (np && of_property_read_bool(
600 np, "use-advanced-sector-protection")
601 && extp->BlkProtUnprot == 8) {
602 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
603 mtd->_lock = cfi_ppb_lock;
604 mtd->_unlock = cfi_ppb_unlock;
605 mtd->_is_locked = cfi_ppb_is_locked;
606 }
607#endif
608
Guillaume LECERF564b8492010-04-24 17:58:17 +0200609 bootloc = extp->TopBottom;
David Woodhouse412da2f2010-05-14 01:35:54 +0100610 if ((bootloc < 2) || (bootloc > 5)) {
611 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
612 "bank location (%d). Assuming bottom.\n",
David Woodhouseabab7eb2010-05-14 09:14:24 +0100613 map->name, bootloc);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200614 bootloc = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
Guillaume LECERF564b8492010-04-24 17:58:17 +0200616
617 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
David Woodhouse412da2f2010-05-14 01:35:54 +0100618 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200619
620 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
621 int j = (cfi->cfiq->NumEraseRegions-1)-i;
Guillaume LECERF564b8492010-04-24 17:58:17 +0200622
Fabian Frederickfdd9d272015-06-10 18:31:32 +0200623 swap(cfi->cfiq->EraseRegionInfo[i],
624 cfi->cfiq->EraseRegionInfo[j]);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200625 }
626 }
627 /* Set the default CFI lock/unlock addresses */
628 cfi->addr_unlock1 = 0x555;
629 cfi->addr_unlock2 = 0x2aa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 }
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200631 cfi_fixup(mtd, cfi_nopri_fixup_table);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200632
633 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
634 kfree(mtd);
635 return NULL;
636 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638 } /* CFI mode */
639 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
640 /* Apply jedec specific fixups */
641 cfi_fixup(mtd, jedec_fixup_table);
642 }
643 /* Apply generic fixups */
644 cfi_fixup(mtd, fixup_table);
645
646 for (i=0; i< cfi->numchips; i++) {
647 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
648 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
649 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
Bean Huo6534e682014-07-16 09:05:26 -0700650 /*
651 * First calculate the timeout max according to timeout field
652 * of struct cfi_ident that probed from chip's CFI aera, if
653 * available. Specify a minimum of 2000us, in case the CFI data
654 * is wrong.
655 */
656 if (cfi->cfiq->BufWriteTimeoutTyp &&
657 cfi->cfiq->BufWriteTimeoutMax)
658 cfi->chips[i].buffer_write_time_max =
659 1 << (cfi->cfiq->BufWriteTimeoutTyp +
660 cfi->cfiq->BufWriteTimeoutMax);
661 else
662 cfi->chips[i].buffer_write_time_max = 0;
663
664 cfi->chips[i].buffer_write_time_max =
665 max(cfi->chips[i].buffer_write_time_max, 2000);
666
Vijay Sampath83d48092007-03-06 02:39:44 -0800667 cfi->chips[i].ref_point_counter = 0;
668 init_waitqueue_head(&(cfi->chips[i].wq));
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000669 }
670
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 map->fldrv = &cfi_amdstd_chipdrv;
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 return cfi_amdstd_setup(mtd);
674}
Guillaume LECERF80461122010-05-20 16:54:10 +0200675struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
David Woodhouse1e804ce2010-05-20 16:54:05 +0200676struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
David Woodhouse83ea4ef2006-05-08 22:58:25 +0100677EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
Guillaume LECERF80461122010-05-20 16:54:10 +0200678EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
David Woodhouse1e804ce2010-05-20 16:54:05 +0200679EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
681static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
682{
683 struct map_info *map = mtd->priv;
684 struct cfi_private *cfi = map->fldrv_priv;
685 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
686 unsigned long offset = 0;
687 int i,j;
688
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000689 printk(KERN_NOTICE "number of %s chips: %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000691 /* Select the correct geometry setup */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 mtd->size = devsize * cfi->numchips;
693
694 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
695 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
696 * mtd->numeraseregions, GFP_KERNEL);
Jingoo Han5c8b1fb2014-02-06 15:19:35 +0900697 if (!mtd->eraseregions)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 goto setup_err;
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
701 unsigned long ernum, ersize;
702 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
703 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 if (mtd->erasesize < ersize) {
706 mtd->erasesize = ersize;
707 }
708 for (j=0; j<cfi->numchips; j++) {
709 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
710 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
711 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
712 }
713 offset += (ersize * ernum);
714 }
715 if (offset != devsize) {
716 /* Argh */
717 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
718 goto setup_err;
719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 __module_get(THIS_MODULE);
Kevin Cernekeeeafe1312010-04-29 10:26:56 -0700722 register_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return mtd;
724
725 setup_err:
Jiri Slaby17fabf12010-01-10 10:01:19 +0100726 kfree(mtd->eraseregions);
727 kfree(mtd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 kfree(cfi->cmdset_priv);
729 kfree(cfi->cfiq);
730 return NULL;
731}
732
733/*
734 * Return true if the chip is ready.
735 *
736 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
737 * non-suspended sector) and is indicated by no toggle bits toggling.
738 *
739 * Note that anything more complicated than checking if no bits are toggling
740 * (including checking DQ5 for an error status) is tricky to get working
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300741 * correctly and is therefore not done (particularly with interleaved chips
742 * as each chip must be checked independently of the others).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 */
Todd Poynor02b15e32005-06-07 00:04:39 +0100744static int __xipram chip_ready(struct map_info *map, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
746 map_word d, t;
747
748 d = map_read(map, addr);
749 t = map_read(map, addr);
750
751 return map_word_equal(map, d, t);
752}
753
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100754/*
755 * Return true if the chip is ready and has the correct value.
756 *
757 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
758 * non-suspended sector) and it is indicated by no bits toggling.
759 *
760 * Error are indicated by toggling bits or bits held with the wrong value,
761 * or with bits toggling.
762 *
763 * Note that anything more complicated than checking if no bits are toggling
764 * (including checking DQ5 for an error status) is tricky to get working
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300765 * correctly and is therefore not done (particularly with interleaved chips
766 * as each chip must be checked independently of the others).
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100767 *
768 */
Todd Poynor02b15e32005-06-07 00:04:39 +0100769static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100770{
771 map_word oldd, curd;
772
773 oldd = map_read(map, addr);
774 curd = map_read(map, addr);
775
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000776 return map_word_equal(map, oldd, curd) &&
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100777 map_word_equal(map, curd, expected);
778}
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
781{
782 DECLARE_WAITQUEUE(wait, current);
783 struct cfi_private *cfi = map->fldrv_priv;
784 unsigned long timeo;
785 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
786
787 resettime:
788 timeo = jiffies + HZ;
789 retry:
790 switch (chip->state) {
791
792 case FL_STATUS:
793 for (;;) {
794 if (chip_ready(map, adr))
795 break;
796
797 if (time_after(jiffies, timeo)) {
798 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 return -EIO;
800 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200801 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 cfi_udelay(1);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200803 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 /* Someone else might have been playing with it. */
805 goto retry;
806 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 case FL_READY:
809 case FL_CFI_QUERY:
810 case FL_JEDEC_QUERY:
811 return 0;
812
813 case FL_ERASING:
Joakim Tjernlund2695eab2009-11-19 12:01:58 +0100814 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
815 !(mode == FL_READY || mode == FL_POINT ||
816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 goto sleep;
818
819 /* We could check to see if we're trying to access the sector
820 * that is currently being erased. However, no user will try
821 * anything like that so we just wait for the timeout. */
822
823 /* Erase suspend */
824 /* It's harmless to issue the Erase-Suspend and Erase-Resume
825 * commands when the erase algorithm isn't in progress. */
826 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
827 chip->oldstate = FL_ERASING;
828 chip->state = FL_ERASE_SUSPENDING;
829 chip->erase_suspended = 1;
830 for (;;) {
831 if (chip_ready(map, adr))
832 break;
833
834 if (time_after(jiffies, timeo)) {
835 /* Should have suspended the erase by now.
836 * Send an Erase-Resume command as either
837 * there was an error (so leave the erase
838 * routine to recover from it) or we trying to
839 * use the erase-in-progress sector. */
Tadashi Abe100f2342011-05-19 15:58:15 +0900840 put_chip(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
842 return -EIO;
843 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000844
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200845 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 cfi_udelay(1);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200847 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
849 So we can just loop here. */
850 }
851 chip->state = FL_READY;
852 return 0;
853
Todd Poynor02b15e32005-06-07 00:04:39 +0100854 case FL_XIP_WHILE_ERASING:
855 if (mode != FL_READY && mode != FL_POINT &&
856 (!cfip || !(cfip->EraseSuspend&2)))
857 goto sleep;
858 chip->oldstate = chip->state;
859 chip->state = FL_READY;
860 return 0;
861
Kevin Cernekeeeafe1312010-04-29 10:26:56 -0700862 case FL_SHUTDOWN:
863 /* The machine is rebooting */
864 return -EIO;
865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 case FL_POINT:
867 /* Only if there's no operation suspended... */
868 if (mode == FL_READY && chip->oldstate == FL_READY)
869 return 0;
870
871 default:
872 sleep:
873 set_current_state(TASK_UNINTERRUPTIBLE);
874 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200875 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 schedule();
877 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200878 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 goto resettime;
880 }
881}
882
883
884static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
885{
886 struct cfi_private *cfi = map->fldrv_priv;
887
888 switch(chip->oldstate) {
889 case FL_ERASING:
Gerlando Falauto42096282012-07-03 09:09:47 +0200890 cfi_fixup_m29ew_erase_suspend(map,
891 chip->in_progress_block_addr);
Guillaume LECERF08968042010-10-26 10:45:23 +0100892 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
Gerlando Falauto42096282012-07-03 09:09:47 +0200893 cfi_fixup_m29ew_delay_after_resume(cfi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 chip->oldstate = FL_READY;
895 chip->state = FL_ERASING;
896 break;
897
Todd Poynor02b15e32005-06-07 00:04:39 +0100898 case FL_XIP_WHILE_ERASING:
899 chip->state = chip->oldstate;
900 chip->oldstate = FL_READY;
901 break;
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 case FL_READY:
904 case FL_STATUS:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 break;
906 default:
907 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
908 }
909 wake_up(&chip->wq);
910}
911
Todd Poynor02b15e32005-06-07 00:04:39 +0100912#ifdef CONFIG_MTD_XIP
913
914/*
915 * No interrupt what so ever can be serviced while the flash isn't in array
916 * mode. This is ensured by the xip_disable() and xip_enable() functions
917 * enclosing any code path where the flash is known not to be in array mode.
918 * And within a XIP disabled code path, only functions marked with __xipram
919 * may be called and nothing else (it's a good thing to inspect generated
920 * assembly to make sure inline functions were actually inlined and that gcc
921 * didn't emit calls to its own support functions). Also configuring MTD CFI
922 * support to a single buswidth and a single interleave is also recommended.
923 */
Thomas Gleixnerf8eb3212005-07-05 01:03:06 +0200924
Todd Poynor02b15e32005-06-07 00:04:39 +0100925static void xip_disable(struct map_info *map, struct flchip *chip,
926 unsigned long adr)
927{
928 /* TODO: chips with no XIP use should ignore and return */
929 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
930 local_irq_disable();
931}
932
933static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
934 unsigned long adr)
935{
936 struct cfi_private *cfi = map->fldrv_priv;
937
938 if (chip->state != FL_POINT && chip->state != FL_READY) {
939 map_write(map, CMD(0xf0), adr);
940 chip->state = FL_READY;
941 }
942 (void) map_read(map, adr);
Thomas Gleixner97f927a2005-07-07 16:50:16 +0200943 xip_iprefetch();
Todd Poynor02b15e32005-06-07 00:04:39 +0100944 local_irq_enable();
945}
946
947/*
948 * When a delay is required for the flash operation to complete, the
949 * xip_udelay() function is polling for both the given timeout and pending
950 * (but still masked) hardware interrupts. Whenever there is an interrupt
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000951 * pending then the flash erase operation is suspended, array mode restored
Todd Poynor02b15e32005-06-07 00:04:39 +0100952 * and interrupts unmasked. Task scheduling might also happen at that
953 * point. The CPU eventually returns from the interrupt or the call to
954 * schedule() and the suspended flash operation is resumed for the remaining
955 * of the delay period.
956 *
957 * Warning: this function _will_ fool interrupt latency tracing tools.
958 */
959
960static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
961 unsigned long adr, int usec)
962{
963 struct cfi_private *cfi = map->fldrv_priv;
964 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
965 map_word status, OK = CMD(0x80);
966 unsigned long suspended, start = xip_currtime();
967 flstate_t oldstate;
968
969 do {
970 cpu_relax();
971 if (xip_irqpending() && extp &&
972 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
973 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
974 /*
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000975 * Let's suspend the erase operation when supported.
976 * Note that we currently don't try to suspend
977 * interleaved chips if there is already another
Todd Poynor02b15e32005-06-07 00:04:39 +0100978 * operation suspended (imagine what happens
979 * when one chip was already done with the current
980 * operation while another chip suspended it, then
981 * we resume the whole thing at once). Yes, it
982 * can happen!
983 */
984 map_write(map, CMD(0xb0), adr);
985 usec -= xip_elapsed_since(start);
986 suspended = xip_currtime();
987 do {
988 if (xip_elapsed_since(suspended) > 100000) {
989 /*
990 * The chip doesn't want to suspend
991 * after waiting for 100 msecs.
992 * This is a critical error but there
993 * is not much we can do here.
994 */
995 return;
996 }
997 status = map_read(map, adr);
998 } while (!map_word_andequal(map, status, OK, OK));
999
1000 /* Suspend succeeded */
1001 oldstate = chip->state;
1002 if (!map_word_bitsset(map, status, CMD(0x40)))
1003 break;
1004 chip->state = FL_XIP_WHILE_ERASING;
1005 chip->erase_suspended = 1;
1006 map_write(map, CMD(0xf0), adr);
1007 (void) map_read(map, adr);
Paulius Zaleckasca5c23c2008-02-27 01:42:39 +02001008 xip_iprefetch();
Todd Poynor02b15e32005-06-07 00:04:39 +01001009 local_irq_enable();
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001010 mutex_unlock(&chip->mutex);
Paulius Zaleckasca5c23c2008-02-27 01:42:39 +02001011 xip_iprefetch();
Todd Poynor02b15e32005-06-07 00:04:39 +01001012 cond_resched();
1013
1014 /*
1015 * We're back. However someone else might have
1016 * decided to go write to the chip if we are in
1017 * a suspended erase state. If so let's wait
1018 * until it's done.
1019 */
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001020 mutex_lock(&chip->mutex);
Todd Poynor02b15e32005-06-07 00:04:39 +01001021 while (chip->state != FL_XIP_WHILE_ERASING) {
1022 DECLARE_WAITQUEUE(wait, current);
1023 set_current_state(TASK_UNINTERRUPTIBLE);
1024 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001025 mutex_unlock(&chip->mutex);
Todd Poynor02b15e32005-06-07 00:04:39 +01001026 schedule();
1027 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001028 mutex_lock(&chip->mutex);
Todd Poynor02b15e32005-06-07 00:04:39 +01001029 }
1030 /* Disallow XIP again */
1031 local_irq_disable();
1032
Gerlando Falauto42096282012-07-03 09:09:47 +02001033 /* Correct Erase Suspend Hangups for M29EW */
1034 cfi_fixup_m29ew_erase_suspend(map, adr);
Todd Poynor02b15e32005-06-07 00:04:39 +01001035 /* Resume the write or erase operation */
Guillaume LECERF08968042010-10-26 10:45:23 +01001036 map_write(map, cfi->sector_erase_cmd, adr);
Todd Poynor02b15e32005-06-07 00:04:39 +01001037 chip->state = oldstate;
1038 start = xip_currtime();
1039 } else if (usec >= 1000000/HZ) {
1040 /*
1041 * Try to save on CPU power when waiting delay
1042 * is at least a system timer tick period.
1043 * No need to be extremely accurate here.
1044 */
1045 xip_cpu_idle();
1046 }
1047 status = map_read(map, adr);
1048 } while (!map_word_andequal(map, status, OK, OK)
1049 && xip_elapsed_since(start) < usec);
1050}
1051
1052#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1053
1054/*
1055 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1056 * the flash is actively programming or erasing since we have to poll for
1057 * the operation to complete anyway. We can't do that in a generic way with
1058 * a XIP setup so do it before the actual flash operation in this case
1059 * and stub it out from INVALIDATE_CACHE_UDELAY.
1060 */
1061#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1062 INVALIDATE_CACHED_RANGE(map, from, size)
1063
1064#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1065 UDELAY(map, chip, adr, usec)
1066
1067/*
1068 * Extra notes:
1069 *
1070 * Activating this XIP support changes the way the code works a bit. For
1071 * example the code to suspend the current process when concurrent access
1072 * happens is never executed because xip_udelay() will always return with the
1073 * same chip state as it was entered with. This is why there is no care for
1074 * the presence of add_wait_queue() or schedule() calls from within a couple
1075 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1076 * The queueing and scheduling are always happening within xip_udelay().
1077 *
1078 * Similarly, get_chip() and put_chip() just happen to always be executed
1079 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1080 * is in array mode, therefore never executing many cases therein and not
1081 * causing any problem with XIP.
1082 */
1083
1084#else
1085
1086#define xip_disable(map, chip, adr)
1087#define xip_enable(map, chip, adr)
1088#define XIP_INVAL_CACHED_RANGE(x...)
1089
1090#define UDELAY(map, chip, adr, usec) \
1091do { \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001092 mutex_unlock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001093 cfi_udelay(usec); \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001094 mutex_lock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001095} while (0)
1096
1097#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1098do { \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001099 mutex_unlock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001100 INVALIDATE_CACHED_RANGE(map, adr, len); \
1101 cfi_udelay(usec); \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001102 mutex_lock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001103} while (0)
1104
1105#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1108{
1109 unsigned long cmd_addr;
1110 struct cfi_private *cfi = map->fldrv_priv;
1111 int ret;
1112
1113 adr += chip->start;
1114
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001115 /* Ensure cmd read/writes are aligned. */
1116 cmd_addr = adr & ~(map_bankwidth(map)-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001118 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 ret = get_chip(map, chip, cmd_addr, FL_READY);
1120 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001121 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 return ret;
1123 }
1124
1125 if (chip->state != FL_POINT && chip->state != FL_READY) {
1126 map_write(map, CMD(0xf0), cmd_addr);
1127 chip->state = FL_READY;
1128 }
1129
1130 map_copy_from(map, buf, adr, len);
1131
1132 put_chip(map, chip, cmd_addr);
1133
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001134 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 return 0;
1136}
1137
1138
1139static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1140{
1141 struct map_info *map = mtd->priv;
1142 struct cfi_private *cfi = map->fldrv_priv;
1143 unsigned long ofs;
1144 int chipnum;
1145 int ret = 0;
1146
1147 /* ofs: offset within the first chip that the first read should start */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 chipnum = (from >> cfi->chipshift);
1149 ofs = from - (chipnum << cfi->chipshift);
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 while (len) {
1152 unsigned long thislen;
1153
1154 if (chipnum >= cfi->numchips)
1155 break;
1156
1157 if ((len + ofs -1) >> cfi->chipshift)
1158 thislen = (1<<cfi->chipshift) - ofs;
1159 else
1160 thislen = len;
1161
1162 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1163 if (ret)
1164 break;
1165
1166 *retlen += thislen;
1167 len -= thislen;
1168 buf += thislen;
1169
1170 ofs = 0;
1171 chipnum++;
1172 }
1173 return ret;
1174}
1175
Christian Rieschdc7e9ec2014-03-06 13:18:27 +01001176typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
Christian Riesch4f5cb242014-03-06 13:18:30 +01001177 loff_t adr, size_t len, u_char *buf, size_t grouplen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Christian Rieschfeb86772013-04-26 21:10:28 +02001179static inline void otp_enter(struct map_info *map, struct flchip *chip,
1180 loff_t adr, size_t len)
1181{
1182 struct cfi_private *cfi = map->fldrv_priv;
1183
1184 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1185 cfi->device_type, NULL);
1186 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1187 cfi->device_type, NULL);
1188 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1189 cfi->device_type, NULL);
1190
1191 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1192}
1193
1194static inline void otp_exit(struct map_info *map, struct flchip *chip,
1195 loff_t adr, size_t len)
1196{
1197 struct cfi_private *cfi = map->fldrv_priv;
1198
1199 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1200 cfi->device_type, NULL);
1201 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1202 cfi->device_type, NULL);
1203 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1204 cfi->device_type, NULL);
1205 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1206 cfi->device_type, NULL);
1207
1208 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1209}
1210
Christian Riesch4f5cb242014-03-06 13:18:30 +01001211static inline int do_read_secsi_onechip(struct map_info *map,
1212 struct flchip *chip, loff_t adr,
1213 size_t len, u_char *buf,
1214 size_t grouplen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215{
1216 DECLARE_WAITQUEUE(wait, current);
1217 unsigned long timeo = jiffies + HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 retry:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001220 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 if (chip->state != FL_READY){
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 set_current_state(TASK_UNINTERRUPTIBLE);
1224 add_wait_queue(&chip->wq, &wait);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001225
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001226 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228 schedule();
1229 remove_wait_queue(&chip->wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 timeo = jiffies + HZ;
1231
1232 goto retry;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 adr += chip->start;
1236
1237 chip->state = FL_READY;
1238
Christian Rieschfeb86772013-04-26 21:10:28 +02001239 otp_enter(map, chip, adr, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 map_copy_from(map, buf, adr, len);
Christian Rieschfeb86772013-04-26 21:10:28 +02001241 otp_exit(map, chip, adr, len);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 wake_up(&chip->wq);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001244 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 return 0;
1247}
1248
1249static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1250{
1251 struct map_info *map = mtd->priv;
1252 struct cfi_private *cfi = map->fldrv_priv;
1253 unsigned long ofs;
1254 int chipnum;
1255 int ret = 0;
1256
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 /* ofs: offset within the first chip that the first read should start */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 /* 8 secsi bytes per chip */
1259 chipnum=from>>3;
1260 ofs=from & 7;
1261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 while (len) {
1263 unsigned long thislen;
1264
1265 if (chipnum >= cfi->numchips)
1266 break;
1267
1268 if ((len + ofs -1) >> 3)
1269 thislen = (1<<3) - ofs;
1270 else
1271 thislen = len;
1272
Christian Riesch4f5cb242014-03-06 13:18:30 +01001273 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1274 thislen, buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 if (ret)
1276 break;
1277
1278 *retlen += thislen;
1279 len -= thislen;
1280 buf += thislen;
1281
1282 ofs = 0;
1283 chipnum++;
1284 }
1285 return ret;
1286}
1287
Christian Rieschaf744752014-03-06 13:18:29 +01001288static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1289 unsigned long adr, map_word datum,
1290 int mode);
1291
1292static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
Christian Riesch4f5cb242014-03-06 13:18:30 +01001293 size_t len, u_char *buf, size_t grouplen)
Christian Rieschaf744752014-03-06 13:18:29 +01001294{
1295 int ret;
1296 while (len) {
1297 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1298 int gap = adr - bus_ofs;
1299 int n = min_t(int, len, map_bankwidth(map) - gap);
Christian Riesch636fdbf2015-03-31 23:29:22 +02001300 map_word datum = map_word_ff(map);
Christian Rieschaf744752014-03-06 13:18:29 +01001301
1302 if (n != map_bankwidth(map)) {
1303 /* partial write of a word, load old contents */
1304 otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1305 datum = map_read(map, bus_ofs);
1306 otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1307 }
1308
1309 datum = map_word_load_partial(map, datum, buf, gap, n);
1310 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1311 if (ret)
1312 return ret;
1313
1314 adr += n;
1315 buf += n;
1316 len -= n;
1317 }
1318
1319 return 0;
1320}
1321
Christian Riesch4f5cb242014-03-06 13:18:30 +01001322static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1323 size_t len, u_char *buf, size_t grouplen)
1324{
1325 struct cfi_private *cfi = map->fldrv_priv;
1326 uint8_t lockreg;
1327 unsigned long timeo;
1328 int ret;
1329
1330 /* make sure area matches group boundaries */
1331 if ((adr != 0) || (len != grouplen))
1332 return -EINVAL;
1333
1334 mutex_lock(&chip->mutex);
1335 ret = get_chip(map, chip, chip->start, FL_LOCKING);
1336 if (ret) {
1337 mutex_unlock(&chip->mutex);
1338 return ret;
1339 }
1340 chip->state = FL_LOCKING;
1341
1342 /* Enter lock register command */
1343 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1344 cfi->device_type, NULL);
1345 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1346 cfi->device_type, NULL);
1347 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1348 cfi->device_type, NULL);
1349
1350 /* read lock register */
1351 lockreg = cfi_read_query(map, 0);
1352
1353 /* set bit 0 to protect extended memory block */
1354 lockreg &= ~0x01;
1355
1356 /* set bit 0 to protect extended memory block */
1357 /* write lock register */
1358 map_write(map, CMD(0xA0), chip->start);
1359 map_write(map, CMD(lockreg), chip->start);
1360
1361 /* wait for chip to become ready */
1362 timeo = jiffies + msecs_to_jiffies(2);
1363 for (;;) {
1364 if (chip_ready(map, adr))
1365 break;
1366
1367 if (time_after(jiffies, timeo)) {
1368 pr_err("Waiting for chip to be ready timed out.\n");
1369 ret = -EIO;
1370 break;
1371 }
1372 UDELAY(map, chip, 0, 1);
1373 }
1374
1375 /* exit protection commands */
1376 map_write(map, CMD(0x90), chip->start);
1377 map_write(map, CMD(0x00), chip->start);
1378
1379 chip->state = FL_READY;
1380 put_chip(map, chip, chip->start);
1381 mutex_unlock(&chip->mutex);
1382
1383 return ret;
1384}
1385
Christian Rieschdc7e9ec2014-03-06 13:18:27 +01001386static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1387 size_t *retlen, u_char *buf,
1388 otp_op_t action, int user_regs)
1389{
1390 struct map_info *map = mtd->priv;
1391 struct cfi_private *cfi = map->fldrv_priv;
1392 int ofs_factor = cfi->interleave * cfi->device_type;
1393 unsigned long base;
1394 int chipnum;
1395 struct flchip *chip;
1396 uint8_t otp, lockreg;
1397 int ret;
1398
1399 size_t user_size, factory_size, otpsize;
1400 loff_t user_offset, factory_offset, otpoffset;
1401 int user_locked = 0, otplocked;
1402
1403 *retlen = 0;
1404
1405 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1406 chip = &cfi->chips[chipnum];
1407 factory_size = 0;
1408 user_size = 0;
1409
1410 /* Micron M29EW family */
1411 if (is_m29ew(cfi)) {
1412 base = chip->start;
1413
1414 /* check whether secsi area is factory locked
1415 or user lockable */
1416 mutex_lock(&chip->mutex);
1417 ret = get_chip(map, chip, base, FL_CFI_QUERY);
1418 if (ret) {
1419 mutex_unlock(&chip->mutex);
1420 return ret;
1421 }
1422 cfi_qry_mode_on(base, map, cfi);
1423 otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1424 cfi_qry_mode_off(base, map, cfi);
1425 put_chip(map, chip, base);
1426 mutex_unlock(&chip->mutex);
1427
1428 if (otp & 0x80) {
1429 /* factory locked */
1430 factory_offset = 0;
1431 factory_size = 0x100;
1432 } else {
1433 /* customer lockable */
1434 user_offset = 0;
1435 user_size = 0x100;
1436
1437 mutex_lock(&chip->mutex);
1438 ret = get_chip(map, chip, base, FL_LOCKING);
Brian Norris5d20bad2014-08-13 23:35:24 -07001439 if (ret) {
1440 mutex_unlock(&chip->mutex);
1441 return ret;
1442 }
Christian Rieschdc7e9ec2014-03-06 13:18:27 +01001443
1444 /* Enter lock register command */
1445 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1446 chip->start, map, cfi,
1447 cfi->device_type, NULL);
1448 cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1449 chip->start, map, cfi,
1450 cfi->device_type, NULL);
1451 cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1452 chip->start, map, cfi,
1453 cfi->device_type, NULL);
1454 /* read lock register */
1455 lockreg = cfi_read_query(map, 0);
1456 /* exit protection commands */
1457 map_write(map, CMD(0x90), chip->start);
1458 map_write(map, CMD(0x00), chip->start);
1459 put_chip(map, chip, chip->start);
1460 mutex_unlock(&chip->mutex);
1461
1462 user_locked = ((lockreg & 0x01) == 0x00);
1463 }
1464 }
1465
1466 otpsize = user_regs ? user_size : factory_size;
1467 if (!otpsize)
1468 continue;
1469 otpoffset = user_regs ? user_offset : factory_offset;
1470 otplocked = user_regs ? user_locked : 1;
1471
1472 if (!action) {
1473 /* return otpinfo */
1474 struct otp_info *otpinfo;
1475 len -= sizeof(*otpinfo);
1476 if (len <= 0)
1477 return -ENOSPC;
1478 otpinfo = (struct otp_info *)buf;
1479 otpinfo->start = from;
1480 otpinfo->length = otpsize;
1481 otpinfo->locked = otplocked;
1482 buf += sizeof(*otpinfo);
1483 *retlen += sizeof(*otpinfo);
1484 from += otpsize;
1485 } else if ((from < otpsize) && (len > 0)) {
1486 size_t size;
1487 size = (len < otpsize - from) ? len : otpsize - from;
Christian Riesch4f5cb242014-03-06 13:18:30 +01001488 ret = action(map, chip, otpoffset + from, size, buf,
1489 otpsize);
Christian Rieschdc7e9ec2014-03-06 13:18:27 +01001490 if (ret < 0)
1491 return ret;
1492
1493 buf += size;
1494 len -= size;
1495 *retlen += size;
1496 from = 0;
1497 } else {
1498 from -= otpsize;
1499 }
1500 }
1501 return 0;
1502}
1503
1504static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1505 size_t *retlen, struct otp_info *buf)
1506{
1507 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1508 NULL, 0);
1509}
1510
1511static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1512 size_t *retlen, struct otp_info *buf)
1513{
1514 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1515 NULL, 1);
1516}
1517
1518static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1519 size_t len, size_t *retlen,
1520 u_char *buf)
1521{
1522 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1523 buf, do_read_secsi_onechip, 0);
1524}
1525
1526static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1527 size_t len, size_t *retlen,
1528 u_char *buf)
1529{
1530 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1531 buf, do_read_secsi_onechip, 1);
1532}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
Christian Rieschaf744752014-03-06 13:18:29 +01001534static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1535 size_t len, size_t *retlen,
1536 u_char *buf)
1537{
1538 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
1539 do_otp_write, 1);
1540}
1541
Christian Riesch4f5cb242014-03-06 13:18:30 +01001542static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1543 size_t len)
1544{
1545 size_t retlen;
1546 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1547 do_otp_lock, 1);
1548}
1549
Christian Rieschaf744752014-03-06 13:18:29 +01001550static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1551 unsigned long adr, map_word datum,
1552 int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553{
1554 struct cfi_private *cfi = map->fldrv_priv;
1555 unsigned long timeo = jiffies + HZ;
1556 /*
1557 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1558 * have a max write time of a few hundreds usec). However, we should
1559 * use the maximum timeout value given by the chip at probe time
1560 * instead. Unfortunately, struct flchip does have a field for
1561 * maximum timeout, only for typical which can be far too short
1562 * depending of the conditions. The ' + 1' is to avoid having a
1563 * timeout of 0 jiffies if HZ is smaller than 1000.
1564 */
1565 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1566 int ret = 0;
1567 map_word oldd;
1568 int retry_cnt = 0;
1569
1570 adr += chip->start;
1571
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001572 mutex_lock(&chip->mutex);
Christian Rieschaf744752014-03-06 13:18:29 +01001573 ret = get_chip(map, chip, adr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001575 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 return ret;
1577 }
1578
Brian Norris289c0522011-07-19 10:06:09 -07001579 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 __func__, adr, datum.x[0] );
1581
Christian Rieschaf744752014-03-06 13:18:29 +01001582 if (mode == FL_OTP_WRITE)
1583 otp_enter(map, chip, adr, map_bankwidth(map));
1584
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 /*
1586 * Check for a NOP for the case when the datum to write is already
1587 * present - it saves time and works around buggy chips that corrupt
1588 * data at other locations when 0xff is written to a location that
1589 * already contains 0xff.
1590 */
1591 oldd = map_read(map, adr);
1592 if (map_word_equal(map, oldd, datum)) {
Brian Norris289c0522011-07-19 10:06:09 -07001593 pr_debug("MTD %s(): NOP\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 __func__);
1595 goto op_done;
1596 }
1597
Todd Poynor02b15e32005-06-07 00:04:39 +01001598 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01001600 xip_disable(map, chip, adr);
Christian Rieschaf744752014-03-06 13:18:29 +01001601
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 retry:
1603 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1604 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1605 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1606 map_write(map, datum, adr);
Christian Rieschaf744752014-03-06 13:18:29 +01001607 chip->state = mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
Todd Poynor02b15e32005-06-07 00:04:39 +01001609 INVALIDATE_CACHE_UDELAY(map, chip,
1610 adr, map_bankwidth(map),
1611 chip->word_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613 /* See comment above for timeout value. */
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001614 timeo = jiffies + uWriteTimeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 for (;;) {
Christian Rieschaf744752014-03-06 13:18:29 +01001616 if (chip->state != mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 /* Someone's suspended the write. Sleep */
1618 DECLARE_WAITQUEUE(wait, current);
1619
1620 set_current_state(TASK_UNINTERRUPTIBLE);
1621 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001622 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 schedule();
1624 remove_wait_queue(&chip->wq, &wait);
1625 timeo = jiffies + (HZ / 2); /* FIXME */
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001626 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 continue;
1628 }
1629
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001630 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
Todd Poynor02b15e32005-06-07 00:04:39 +01001631 xip_enable(map, chip, adr);
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001632 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
Todd Poynor02b15e32005-06-07 00:04:39 +01001633 xip_disable(map, chip, adr);
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001634 break;
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001637 if (chip_ready(map, adr))
1638 break;
1639
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01001641 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 }
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001643 /* Did we succeed? */
1644 if (!chip_good(map, adr, datum)) {
1645 /* reset on all failures. */
1646 map_write( map, CMD(0xF0), chip->start );
1647 /* FIXME - should have reset delay before continuing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001649 if (++retry_cnt <= MAX_WORD_RETRIES)
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001650 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001652 ret = -EIO;
1653 }
Todd Poynor02b15e32005-06-07 00:04:39 +01001654 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 op_done:
Christian Rieschaf744752014-03-06 13:18:29 +01001656 if (mode == FL_OTP_WRITE)
1657 otp_exit(map, chip, adr, map_bankwidth(map));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 chip->state = FL_READY;
Paul Parsonse7d93772012-03-07 14:11:16 +00001659 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001661 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
1663 return ret;
1664}
1665
1666
1667static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1668 size_t *retlen, const u_char *buf)
1669{
1670 struct map_info *map = mtd->priv;
1671 struct cfi_private *cfi = map->fldrv_priv;
1672 int ret = 0;
1673 int chipnum;
1674 unsigned long ofs, chipstart;
1675 DECLARE_WAITQUEUE(wait, current);
1676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 chipnum = to >> cfi->chipshift;
1678 ofs = to - (chipnum << cfi->chipshift);
1679 chipstart = cfi->chips[chipnum].start;
1680
1681 /* If it's not bus-aligned, do the first byte write */
1682 if (ofs & (map_bankwidth(map)-1)) {
1683 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1684 int i = ofs - bus_ofs;
1685 int n = 0;
1686 map_word tmp_buf;
1687
1688 retry:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001689 mutex_lock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691 if (cfi->chips[chipnum].state != FL_READY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 set_current_state(TASK_UNINTERRUPTIBLE);
1693 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1694
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001695 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697 schedule();
1698 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 goto retry;
1700 }
1701
1702 /* Load 'tmp_buf' with old contents of flash */
1703 tmp_buf = map_read(map, bus_ofs+chipstart);
1704
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001705 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 /* Number of bytes to copy from buffer */
1708 n = min_t(int, len, map_bankwidth(map)-i);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1711
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001712 ret = do_write_oneword(map, &cfi->chips[chipnum],
Christian Rieschaf744752014-03-06 13:18:29 +01001713 bus_ofs, tmp_buf, FL_WRITING);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001714 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 return ret;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 ofs += n;
1718 buf += n;
1719 (*retlen) += n;
1720 len -= n;
1721
1722 if (ofs >> cfi->chipshift) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001723 chipnum ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 ofs = 0;
1725 if (chipnum == cfi->numchips)
1726 return 0;
1727 }
1728 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001729
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 /* We are now aligned, write as much as possible */
1731 while(len >= map_bankwidth(map)) {
1732 map_word datum;
1733
1734 datum = map_word_load(map, buf);
1735
1736 ret = do_write_oneword(map, &cfi->chips[chipnum],
Christian Rieschaf744752014-03-06 13:18:29 +01001737 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 if (ret)
1739 return ret;
1740
1741 ofs += map_bankwidth(map);
1742 buf += map_bankwidth(map);
1743 (*retlen) += map_bankwidth(map);
1744 len -= map_bankwidth(map);
1745
1746 if (ofs >> cfi->chipshift) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001747 chipnum ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 ofs = 0;
1749 if (chipnum == cfi->numchips)
1750 return 0;
1751 chipstart = cfi->chips[chipnum].start;
1752 }
1753 }
1754
1755 /* Write the trailing bytes if any */
1756 if (len & (map_bankwidth(map)-1)) {
1757 map_word tmp_buf;
1758
1759 retry1:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001760 mutex_lock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
1762 if (cfi->chips[chipnum].state != FL_READY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 set_current_state(TASK_UNINTERRUPTIBLE);
1764 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1765
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001766 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768 schedule();
1769 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 goto retry1;
1771 }
1772
1773 tmp_buf = map_read(map, ofs + chipstart);
1774
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001775 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
1777 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001778
1779 ret = do_write_oneword(map, &cfi->chips[chipnum],
Christian Rieschaf744752014-03-06 13:18:29 +01001780 ofs, tmp_buf, FL_WRITING);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001781 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 return ret;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 (*retlen) += len;
1785 }
1786
1787 return 0;
1788}
1789
1790
1791/*
1792 * FIXME: interleaved mode not tested, and probably not supported!
1793 */
Todd Poynor02b15e32005-06-07 00:04:39 +01001794static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001795 unsigned long adr, const u_char *buf,
Todd Poynor02b15e32005-06-07 00:04:39 +01001796 int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797{
1798 struct cfi_private *cfi = map->fldrv_priv;
1799 unsigned long timeo = jiffies + HZ;
Bean Huo6534e682014-07-16 09:05:26 -07001800 /*
1801 * Timeout is calculated according to CFI data, if available.
1802 * See more comments in cfi_cmdset_0002().
1803 */
1804 unsigned long uWriteTimeout =
1805 usecs_to_jiffies(chip->buffer_write_time_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 int ret = -EIO;
1807 unsigned long cmd_adr;
1808 int z, words;
1809 map_word datum;
1810
1811 adr += chip->start;
1812 cmd_adr = adr;
1813
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001814 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 ret = get_chip(map, chip, adr, FL_WRITING);
1816 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001817 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 return ret;
1819 }
1820
1821 datum = map_word_load(map, buf);
1822
Brian Norris289c0522011-07-19 10:06:09 -07001823 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 __func__, adr, datum.x[0] );
1825
Todd Poynor02b15e32005-06-07 00:04:39 +01001826 XIP_INVAL_CACHED_RANGE(map, adr, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01001828 xip_disable(map, chip, cmd_adr);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1831 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
1833 /* Write Buffer Load */
1834 map_write(map, CMD(0x25), cmd_adr);
1835
1836 chip->state = FL_WRITING_TO_BUFFER;
1837
1838 /* Write length of data to come */
1839 words = len / map_bankwidth(map);
1840 map_write(map, CMD(words - 1), cmd_adr);
1841 /* Write data */
1842 z = 0;
1843 while(z < words * map_bankwidth(map)) {
1844 datum = map_word_load(map, buf);
1845 map_write(map, datum, adr + z);
1846
1847 z += map_bankwidth(map);
1848 buf += map_bankwidth(map);
1849 }
1850 z -= map_bankwidth(map);
1851
1852 adr += z;
1853
1854 /* Write Buffer Program Confirm: GO GO GO */
1855 map_write(map, CMD(0x29), cmd_adr);
1856 chip->state = FL_WRITING;
1857
Todd Poynor02b15e32005-06-07 00:04:39 +01001858 INVALIDATE_CACHE_UDELAY(map, chip,
1859 adr, map_bankwidth(map),
1860 chip->word_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001862 timeo = jiffies + uWriteTimeout;
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 for (;;) {
1865 if (chip->state != FL_WRITING) {
1866 /* Someone's suspended the write. Sleep */
1867 DECLARE_WAITQUEUE(wait, current);
1868
1869 set_current_state(TASK_UNINTERRUPTIBLE);
1870 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001871 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 schedule();
1873 remove_wait_queue(&chip->wq, &wait);
1874 timeo = jiffies + (HZ / 2); /* FIXME */
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001875 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 continue;
1877 }
1878
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001879 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1880 break;
1881
Todd Poynor02b15e32005-06-07 00:04:39 +01001882 if (chip_ready(map, adr)) {
1883 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 goto op_done;
Todd Poynor02b15e32005-06-07 00:04:39 +01001885 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
1887 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01001888 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 }
1890
Harald Nordgard-Hansen070c3222012-11-23 23:11:03 +01001891 /*
1892 * Recovery from write-buffer programming failures requires
1893 * the write-to-buffer-reset sequence. Since the last part
1894 * of the sequence also works as a normal reset, we can run
1895 * the same commands regardless of why we are here.
1896 * See e.g.
1897 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1898 */
1899 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1900 cfi->device_type, NULL);
1901 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1902 cfi->device_type, NULL);
1903 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1904 cfi->device_type, NULL);
Todd Poynor02b15e32005-06-07 00:04:39 +01001905 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 /* FIXME - should have reset delay before continuing */
1907
Huang Shijie25983b12013-04-01 17:40:18 +08001908 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
1909 __func__, adr);
Todd Poynor02b15e32005-06-07 00:04:39 +01001910
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 ret = -EIO;
1912 op_done:
1913 chip->state = FL_READY;
Paul Parsonse7d93772012-03-07 14:11:16 +00001914 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001916 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
1918 return ret;
1919}
1920
1921
1922static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1923 size_t *retlen, const u_char *buf)
1924{
1925 struct map_info *map = mtd->priv;
1926 struct cfi_private *cfi = map->fldrv_priv;
1927 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1928 int ret = 0;
1929 int chipnum;
1930 unsigned long ofs;
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 chipnum = to >> cfi->chipshift;
1933 ofs = to - (chipnum << cfi->chipshift);
1934
1935 /* If it's not bus-aligned, do the first word write */
1936 if (ofs & (map_bankwidth(map)-1)) {
1937 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1938 if (local_len > len)
1939 local_len = len;
1940 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1941 local_len, retlen, buf);
1942 if (ret)
1943 return ret;
1944 ofs += local_len;
1945 buf += local_len;
1946 len -= local_len;
1947
1948 if (ofs >> cfi->chipshift) {
1949 chipnum ++;
1950 ofs = 0;
1951 if (chipnum == cfi->numchips)
1952 return 0;
1953 }
1954 }
1955
1956 /* Write buffer is worth it only if more than one word to write... */
1957 while (len >= map_bankwidth(map) * 2) {
1958 /* We must not cross write block boundaries */
1959 int size = wbufsize - (ofs & (wbufsize-1));
1960
1961 if (size > len)
1962 size = len;
1963 if (size % map_bankwidth(map))
1964 size -= size % map_bankwidth(map);
1965
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001966 ret = do_write_buffer(map, &cfi->chips[chipnum],
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 ofs, buf, size);
1968 if (ret)
1969 return ret;
1970
1971 ofs += size;
1972 buf += size;
1973 (*retlen) += size;
1974 len -= size;
1975
1976 if (ofs >> cfi->chipshift) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001977 chipnum ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 ofs = 0;
1979 if (chipnum == cfi->numchips)
1980 return 0;
1981 }
1982 }
1983
1984 if (len) {
1985 size_t retlen_dregs = 0;
1986
1987 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1988 len, &retlen_dregs, buf);
1989
1990 *retlen += retlen_dregs;
1991 return ret;
1992 }
1993
1994 return 0;
1995}
1996
Ira W. Snyder30ec5a22012-01-06 11:29:19 -08001997/*
1998 * Wait for the flash chip to become ready to write data
1999 *
2000 * This is only called during the panic_write() path. When panic_write()
2001 * is called, the kernel is in the process of a panic, and will soon be
2002 * dead. Therefore we don't take any locks, and attempt to get access
2003 * to the chip as soon as possible.
2004 */
2005static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2006 unsigned long adr)
2007{
2008 struct cfi_private *cfi = map->fldrv_priv;
2009 int retries = 10;
2010 int i;
2011
2012 /*
2013 * If the driver thinks the chip is idle, and no toggle bits
2014 * are changing, then the chip is actually idle for sure.
2015 */
2016 if (chip->state == FL_READY && chip_ready(map, adr))
2017 return 0;
2018
2019 /*
2020 * Try several times to reset the chip and then wait for it
2021 * to become idle. The upper limit of a few milliseconds of
2022 * delay isn't a big problem: the kernel is dying anyway. It
2023 * is more important to save the messages.
2024 */
2025 while (retries > 0) {
2026 const unsigned long timeo = (HZ / 1000) + 1;
2027
2028 /* send the reset command */
2029 map_write(map, CMD(0xF0), chip->start);
2030
2031 /* wait for the chip to become ready */
2032 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2033 if (chip_ready(map, adr))
2034 return 0;
2035
2036 udelay(1);
2037 }
Brian Norris36c6a7ac2014-07-21 19:06:19 -07002038
2039 retries--;
Ira W. Snyder30ec5a22012-01-06 11:29:19 -08002040 }
2041
2042 /* the chip never became ready */
2043 return -EBUSY;
2044}
2045
2046/*
2047 * Write out one word of data to a single flash chip during a kernel panic
2048 *
2049 * This is only called during the panic_write() path. When panic_write()
2050 * is called, the kernel is in the process of a panic, and will soon be
2051 * dead. Therefore we don't take any locks, and attempt to get access
2052 * to the chip as soon as possible.
2053 *
2054 * The implementation of this routine is intentionally similar to
2055 * do_write_oneword(), in order to ease code maintenance.
2056 */
2057static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2058 unsigned long adr, map_word datum)
2059{
2060 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2061 struct cfi_private *cfi = map->fldrv_priv;
2062 int retry_cnt = 0;
2063 map_word oldd;
2064 int ret = 0;
2065 int i;
2066
2067 adr += chip->start;
2068
2069 ret = cfi_amdstd_panic_wait(map, chip, adr);
2070 if (ret)
2071 return ret;
2072
2073 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2074 __func__, adr, datum.x[0]);
2075
2076 /*
2077 * Check for a NOP for the case when the datum to write is already
2078 * present - it saves time and works around buggy chips that corrupt
2079 * data at other locations when 0xff is written to a location that
2080 * already contains 0xff.
2081 */
2082 oldd = map_read(map, adr);
2083 if (map_word_equal(map, oldd, datum)) {
2084 pr_debug("MTD %s(): NOP\n", __func__);
2085 goto op_done;
2086 }
2087
2088 ENABLE_VPP(map);
2089
2090retry:
2091 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2092 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2093 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2094 map_write(map, datum, adr);
2095
2096 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2097 if (chip_ready(map, adr))
2098 break;
2099
2100 udelay(1);
2101 }
2102
2103 if (!chip_good(map, adr, datum)) {
2104 /* reset on all failures. */
2105 map_write(map, CMD(0xF0), chip->start);
2106 /* FIXME - should have reset delay before continuing */
2107
2108 if (++retry_cnt <= MAX_WORD_RETRIES)
2109 goto retry;
2110
2111 ret = -EIO;
2112 }
2113
2114op_done:
2115 DISABLE_VPP(map);
2116 return ret;
2117}
2118
2119/*
2120 * Write out some data during a kernel panic
2121 *
2122 * This is used by the mtdoops driver to save the dying messages from a
2123 * kernel which has panic'd.
2124 *
2125 * This routine ignores all of the locking used throughout the rest of the
2126 * driver, in order to ensure that the data gets written out no matter what
2127 * state this driver (and the flash chip itself) was in when the kernel crashed.
2128 *
2129 * The implementation of this routine is intentionally similar to
2130 * cfi_amdstd_write_words(), in order to ease code maintenance.
2131 */
2132static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2133 size_t *retlen, const u_char *buf)
2134{
2135 struct map_info *map = mtd->priv;
2136 struct cfi_private *cfi = map->fldrv_priv;
2137 unsigned long ofs, chipstart;
2138 int ret = 0;
2139 int chipnum;
2140
Ira W. Snyder30ec5a22012-01-06 11:29:19 -08002141 chipnum = to >> cfi->chipshift;
2142 ofs = to - (chipnum << cfi->chipshift);
2143 chipstart = cfi->chips[chipnum].start;
2144
2145 /* If it's not bus aligned, do the first byte write */
2146 if (ofs & (map_bankwidth(map) - 1)) {
2147 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2148 int i = ofs - bus_ofs;
2149 int n = 0;
2150 map_word tmp_buf;
2151
2152 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2153 if (ret)
2154 return ret;
2155
2156 /* Load 'tmp_buf' with old contents of flash */
2157 tmp_buf = map_read(map, bus_ofs + chipstart);
2158
2159 /* Number of bytes to copy from buffer */
2160 n = min_t(int, len, map_bankwidth(map) - i);
2161
2162 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2163
2164 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2165 bus_ofs, tmp_buf);
2166 if (ret)
2167 return ret;
2168
2169 ofs += n;
2170 buf += n;
2171 (*retlen) += n;
2172 len -= n;
2173
2174 if (ofs >> cfi->chipshift) {
2175 chipnum++;
2176 ofs = 0;
2177 if (chipnum == cfi->numchips)
2178 return 0;
2179 }
2180 }
2181
2182 /* We are now aligned, write as much as possible */
2183 while (len >= map_bankwidth(map)) {
2184 map_word datum;
2185
2186 datum = map_word_load(map, buf);
2187
2188 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2189 ofs, datum);
2190 if (ret)
2191 return ret;
2192
2193 ofs += map_bankwidth(map);
2194 buf += map_bankwidth(map);
2195 (*retlen) += map_bankwidth(map);
2196 len -= map_bankwidth(map);
2197
2198 if (ofs >> cfi->chipshift) {
2199 chipnum++;
2200 ofs = 0;
2201 if (chipnum == cfi->numchips)
2202 return 0;
2203
2204 chipstart = cfi->chips[chipnum].start;
2205 }
2206 }
2207
2208 /* Write the trailing bytes if any */
2209 if (len & (map_bankwidth(map) - 1)) {
2210 map_word tmp_buf;
2211
2212 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2213 if (ret)
2214 return ret;
2215
2216 tmp_buf = map_read(map, ofs + chipstart);
2217
2218 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2219
2220 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2221 ofs, tmp_buf);
2222 if (ret)
2223 return ret;
2224
2225 (*retlen) += len;
2226 }
2227
2228 return 0;
2229}
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232/*
2233 * Handle devices with one erase region, that only implement
2234 * the chip erase command.
2235 */
Todd Poynor02b15e32005-06-07 00:04:39 +01002236static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237{
2238 struct cfi_private *cfi = map->fldrv_priv;
2239 unsigned long timeo = jiffies + HZ;
2240 unsigned long int adr;
2241 DECLARE_WAITQUEUE(wait, current);
2242 int ret = 0;
2243
2244 adr = cfi->addr_unlock1;
2245
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002246 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 ret = get_chip(map, chip, adr, FL_WRITING);
2248 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002249 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 return ret;
2251 }
2252
Brian Norris289c0522011-07-19 10:06:09 -07002253 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 __func__, chip->start );
2255
Todd Poynor02b15e32005-06-07 00:04:39 +01002256 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01002258 xip_disable(map, chip, adr);
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2261 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2262 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2263 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2264 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2265 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2266
2267 chip->state = FL_ERASING;
2268 chip->erase_suspended = 0;
2269 chip->in_progress_block_addr = adr;
2270
Todd Poynor02b15e32005-06-07 00:04:39 +01002271 INVALIDATE_CACHE_UDELAY(map, chip,
2272 adr, map->size,
2273 chip->erase_time*500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
2275 timeo = jiffies + (HZ*20);
2276
2277 for (;;) {
2278 if (chip->state != FL_ERASING) {
2279 /* Someone's suspended the erase. Sleep */
2280 set_current_state(TASK_UNINTERRUPTIBLE);
2281 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002282 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 schedule();
2284 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002285 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 continue;
2287 }
2288 if (chip->erase_suspended) {
2289 /* This erase was suspended and resumed.
2290 Adjust the timeout */
2291 timeo = jiffies + (HZ*20); /* FIXME */
2292 chip->erase_suspended = 0;
2293 }
2294
2295 if (chip_ready(map, adr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 break;
2297
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002298 if (time_after(jiffies, timeo)) {
2299 printk(KERN_WARNING "MTD %s(): software timeout\n",
2300 __func__ );
2301 break;
2302 }
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01002305 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 }
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002307 /* Did we succeed? */
2308 if (!chip_good(map, adr, map_word_ff(map))) {
2309 /* reset on all failures. */
2310 map_write( map, CMD(0xF0), chip->start );
2311 /* FIXME - should have reset delay before continuing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002313 ret = -EIO;
2314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 chip->state = FL_READY;
Todd Poynor02b15e32005-06-07 00:04:39 +01002317 xip_enable(map, chip, adr);
Paul Parsonse7d93772012-03-07 14:11:16 +00002318 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002320 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 return ret;
2323}
2324
2325
Todd Poynor02b15e32005-06-07 00:04:39 +01002326static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327{
2328 struct cfi_private *cfi = map->fldrv_priv;
2329 unsigned long timeo = jiffies + HZ;
2330 DECLARE_WAITQUEUE(wait, current);
2331 int ret = 0;
2332
2333 adr += chip->start;
2334
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002335 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 ret = get_chip(map, chip, adr, FL_ERASING);
2337 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002338 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 return ret;
2340 }
2341
Brian Norris289c0522011-07-19 10:06:09 -07002342 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 __func__, adr );
2344
Todd Poynor02b15e32005-06-07 00:04:39 +01002345 XIP_INVAL_CACHED_RANGE(map, adr, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01002347 xip_disable(map, chip, adr);
2348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2350 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2351 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2352 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2353 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
Guillaume LECERF08968042010-10-26 10:45:23 +01002354 map_write(map, cfi->sector_erase_cmd, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
2356 chip->state = FL_ERASING;
2357 chip->erase_suspended = 0;
2358 chip->in_progress_block_addr = adr;
Todd Poynor02b15e32005-06-07 00:04:39 +01002359
2360 INVALIDATE_CACHE_UDELAY(map, chip,
2361 adr, len,
2362 chip->erase_time*500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
2364 timeo = jiffies + (HZ*20);
2365
2366 for (;;) {
2367 if (chip->state != FL_ERASING) {
2368 /* Someone's suspended the erase. Sleep */
2369 set_current_state(TASK_UNINTERRUPTIBLE);
2370 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002371 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 schedule();
2373 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002374 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 continue;
2376 }
2377 if (chip->erase_suspended) {
2378 /* This erase was suspended and resumed.
2379 Adjust the timeout */
2380 timeo = jiffies + (HZ*20); /* FIXME */
2381 chip->erase_suspended = 0;
2382 }
2383
Todd Poynor02b15e32005-06-07 00:04:39 +01002384 if (chip_ready(map, adr)) {
2385 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 break;
Todd Poynor02b15e32005-06-07 00:04:39 +01002387 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002389 if (time_after(jiffies, timeo)) {
Todd Poynor02b15e32005-06-07 00:04:39 +01002390 xip_enable(map, chip, adr);
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002391 printk(KERN_WARNING "MTD %s(): software timeout\n",
2392 __func__ );
2393 break;
2394 }
2395
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01002397 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 }
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002399 /* Did we succeed? */
Thomas Gleixner22fd9a82005-05-24 15:33:49 +02002400 if (!chip_good(map, adr, map_word_ff(map))) {
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002401 /* reset on all failures. */
2402 map_write( map, CMD(0xF0), chip->start );
2403 /* FIXME - should have reset delay before continuing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002405 ret = -EIO;
2406 }
2407
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 chip->state = FL_READY;
Paul Parsonse7d93772012-03-07 14:11:16 +00002409 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002411 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 return ret;
2413}
2414
2415
Ben Dooksce0f33a2007-05-28 19:59:00 +01002416static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417{
2418 unsigned long ofs, len;
2419 int ret;
2420
2421 ofs = instr->addr;
2422 len = instr->len;
2423
2424 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2425 if (ret)
2426 return ret;
2427
2428 instr->state = MTD_ERASE_DONE;
2429 mtd_erase_callback(instr);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 return 0;
2432}
2433
2434
2435static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2436{
2437 struct map_info *map = mtd->priv;
2438 struct cfi_private *cfi = map->fldrv_priv;
2439 int ret = 0;
2440
2441 if (instr->addr != 0)
2442 return -EINVAL;
2443
2444 if (instr->len != mtd->size)
2445 return -EINVAL;
2446
2447 ret = do_erase_chip(map, &cfi->chips[0]);
2448 if (ret)
2449 return ret;
2450
2451 instr->state = MTD_ERASE_DONE;
2452 mtd_erase_callback(instr);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 return 0;
2455}
2456
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002457static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2458 unsigned long adr, int len, void *thunk)
2459{
2460 struct cfi_private *cfi = map->fldrv_priv;
2461 int ret;
2462
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002463 mutex_lock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002464 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2465 if (ret)
2466 goto out_unlock;
2467 chip->state = FL_LOCKING;
2468
Brian Norris0a32a102011-07-19 10:06:10 -07002469 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002470
2471 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2472 cfi->device_type, NULL);
2473 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2474 cfi->device_type, NULL);
2475 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2476 cfi->device_type, NULL);
2477 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2478 cfi->device_type, NULL);
2479 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2480 cfi->device_type, NULL);
2481 map_write(map, CMD(0x40), chip->start + adr);
2482
2483 chip->state = FL_READY;
2484 put_chip(map, chip, adr + chip->start);
2485 ret = 0;
2486
2487out_unlock:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002488 mutex_unlock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002489 return ret;
2490}
2491
2492static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2493 unsigned long adr, int len, void *thunk)
2494{
2495 struct cfi_private *cfi = map->fldrv_priv;
2496 int ret;
2497
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002498 mutex_lock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002499 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2500 if (ret)
2501 goto out_unlock;
2502 chip->state = FL_UNLOCKING;
2503
Brian Norris0a32a102011-07-19 10:06:10 -07002504 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002505
2506 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2507 cfi->device_type, NULL);
2508 map_write(map, CMD(0x70), adr);
2509
2510 chip->state = FL_READY;
2511 put_chip(map, chip, adr + chip->start);
2512 ret = 0;
2513
2514out_unlock:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002515 mutex_unlock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002516 return ret;
2517}
2518
Adrian Hunter69423d92008-12-10 13:37:21 +00002519static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002520{
2521 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2522}
2523
Adrian Hunter69423d92008-12-10 13:37:21 +00002524static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002525{
2526 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2527}
2528
Stefan Roese1648eaa2013-01-18 13:10:05 +01002529/*
2530 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2531 */
2532
2533struct ppb_lock {
2534 struct flchip *chip;
2535 loff_t offset;
2536 int locked;
2537};
2538
2539#define MAX_SECTORS 512
2540
2541#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2542#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2543#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2544
2545static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2546 struct flchip *chip,
2547 unsigned long adr, int len, void *thunk)
2548{
2549 struct cfi_private *cfi = map->fldrv_priv;
2550 unsigned long timeo;
2551 int ret;
2552
2553 mutex_lock(&chip->mutex);
2554 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2555 if (ret) {
2556 mutex_unlock(&chip->mutex);
2557 return ret;
2558 }
2559
2560 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2561
2562 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2563 cfi->device_type, NULL);
2564 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2565 cfi->device_type, NULL);
2566 /* PPB entry command */
2567 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2568 cfi->device_type, NULL);
2569
2570 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2571 chip->state = FL_LOCKING;
2572 map_write(map, CMD(0xA0), chip->start + adr);
2573 map_write(map, CMD(0x00), chip->start + adr);
2574 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2575 /*
2576 * Unlocking of one specific sector is not supported, so we
2577 * have to unlock all sectors of this device instead
2578 */
2579 chip->state = FL_UNLOCKING;
2580 map_write(map, CMD(0x80), chip->start);
2581 map_write(map, CMD(0x30), chip->start);
2582 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2583 chip->state = FL_JEDEC_QUERY;
2584 /* Return locked status: 0->locked, 1->unlocked */
2585 ret = !cfi_read_query(map, adr);
2586 } else
2587 BUG();
2588
2589 /*
2590 * Wait for some time as unlocking of all sectors takes quite long
2591 */
2592 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2593 for (;;) {
2594 if (chip_ready(map, adr))
2595 break;
2596
2597 if (time_after(jiffies, timeo)) {
2598 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2599 ret = -EIO;
2600 break;
2601 }
2602
2603 UDELAY(map, chip, adr, 1);
2604 }
2605
2606 /* Exit BC commands */
2607 map_write(map, CMD(0x90), chip->start);
2608 map_write(map, CMD(0x00), chip->start);
2609
2610 chip->state = FL_READY;
2611 put_chip(map, chip, adr + chip->start);
2612 mutex_unlock(&chip->mutex);
2613
2614 return ret;
2615}
2616
2617static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2618 uint64_t len)
2619{
2620 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2621 DO_XXLOCK_ONEBLOCK_LOCK);
2622}
2623
2624static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2625 uint64_t len)
2626{
2627 struct mtd_erase_region_info *regions = mtd->eraseregions;
2628 struct map_info *map = mtd->priv;
2629 struct cfi_private *cfi = map->fldrv_priv;
2630 struct ppb_lock *sect;
2631 unsigned long adr;
2632 loff_t offset;
2633 uint64_t length;
2634 int chipnum;
2635 int i;
2636 int sectors;
2637 int ret;
2638
2639 /*
2640 * PPB unlocking always unlocks all sectors of the flash chip.
2641 * We need to re-lock all previously locked sectors. So lets
2642 * first check the locking status of all sectors and save
2643 * it for future use.
2644 */
2645 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2646 if (!sect)
2647 return -ENOMEM;
2648
2649 /*
2650 * This code to walk all sectors is a slightly modified version
2651 * of the cfi_varsize_frob() code.
2652 */
2653 i = 0;
2654 chipnum = 0;
2655 adr = 0;
2656 sectors = 0;
2657 offset = 0;
2658 length = mtd->size;
2659
2660 while (length) {
2661 int size = regions[i].erasesize;
2662
2663 /*
2664 * Only test sectors that shall not be unlocked. The other
2665 * sectors shall be unlocked, so lets keep their locking
2666 * status at "unlocked" (locked=0) for the final re-locking.
2667 */
2668 if ((adr < ofs) || (adr >= (ofs + len))) {
2669 sect[sectors].chip = &cfi->chips[chipnum];
2670 sect[sectors].offset = offset;
2671 sect[sectors].locked = do_ppb_xxlock(
2672 map, &cfi->chips[chipnum], adr, 0,
2673 DO_XXLOCK_ONEBLOCK_GETLOCK);
2674 }
2675
2676 adr += size;
2677 offset += size;
2678 length -= size;
2679
2680 if (offset == regions[i].offset + size * regions[i].numblocks)
2681 i++;
2682
2683 if (adr >> cfi->chipshift) {
2684 adr = 0;
2685 chipnum++;
2686
2687 if (chipnum >= cfi->numchips)
2688 break;
2689 }
2690
2691 sectors++;
2692 if (sectors >= MAX_SECTORS) {
2693 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2694 MAX_SECTORS);
2695 kfree(sect);
2696 return -EINVAL;
2697 }
2698 }
2699
2700 /* Now unlock the whole chip */
2701 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2702 DO_XXLOCK_ONEBLOCK_UNLOCK);
2703 if (ret) {
2704 kfree(sect);
2705 return ret;
2706 }
2707
2708 /*
2709 * PPB unlocking always unlocks all sectors of the flash chip.
2710 * We need to re-lock all previously locked sectors.
2711 */
2712 for (i = 0; i < sectors; i++) {
2713 if (sect[i].locked)
2714 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2715 DO_XXLOCK_ONEBLOCK_LOCK);
2716 }
2717
2718 kfree(sect);
2719 return ret;
2720}
2721
2722static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2723 uint64_t len)
2724{
2725 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2726 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2727}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728
2729static void cfi_amdstd_sync (struct mtd_info *mtd)
2730{
2731 struct map_info *map = mtd->priv;
2732 struct cfi_private *cfi = map->fldrv_priv;
2733 int i;
2734 struct flchip *chip;
2735 int ret = 0;
2736 DECLARE_WAITQUEUE(wait, current);
2737
2738 for (i=0; !ret && i<cfi->numchips; i++) {
2739 chip = &cfi->chips[i];
2740
2741 retry:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002742 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
2744 switch(chip->state) {
2745 case FL_READY:
2746 case FL_STATUS:
2747 case FL_CFI_QUERY:
2748 case FL_JEDEC_QUERY:
2749 chip->oldstate = chip->state;
2750 chip->state = FL_SYNCING;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002751 /* No need to wake_up() on this state change -
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 * as the whole point is that nobody can do anything
2753 * with the chip now anyway.
2754 */
2755 case FL_SYNCING:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002756 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 break;
2758
2759 default:
2760 /* Not an idle state */
Dmitry Adamushkof8e30e42008-04-08 17:41:59 -07002761 set_current_state(TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 add_wait_queue(&chip->wq, &wait);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002763
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002764 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
2766 schedule();
2767
2768 remove_wait_queue(&chip->wq, &wait);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 goto retry;
2771 }
2772 }
2773
2774 /* Unlock the chips again */
2775
2776 for (i--; i >=0; i--) {
2777 chip = &cfi->chips[i];
2778
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002779 mutex_lock(&chip->mutex);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002780
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 if (chip->state == FL_SYNCING) {
2782 chip->state = chip->oldstate;
2783 wake_up(&chip->wq);
2784 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002785 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 }
2787}
2788
2789
2790static int cfi_amdstd_suspend(struct mtd_info *mtd)
2791{
2792 struct map_info *map = mtd->priv;
2793 struct cfi_private *cfi = map->fldrv_priv;
2794 int i;
2795 struct flchip *chip;
2796 int ret = 0;
2797
2798 for (i=0; !ret && i<cfi->numchips; i++) {
2799 chip = &cfi->chips[i];
2800
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002801 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
2803 switch(chip->state) {
2804 case FL_READY:
2805 case FL_STATUS:
2806 case FL_CFI_QUERY:
2807 case FL_JEDEC_QUERY:
2808 chip->oldstate = chip->state;
2809 chip->state = FL_PM_SUSPENDED;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002810 /* No need to wake_up() on this state change -
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 * as the whole point is that nobody can do anything
2812 * with the chip now anyway.
2813 */
2814 case FL_PM_SUSPENDED:
2815 break;
2816
2817 default:
2818 ret = -EAGAIN;
2819 break;
2820 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002821 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 }
2823
2824 /* Unlock the chips again */
2825
2826 if (ret) {
2827 for (i--; i >=0; i--) {
2828 chip = &cfi->chips[i];
2829
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002830 mutex_lock(&chip->mutex);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 if (chip->state == FL_PM_SUSPENDED) {
2833 chip->state = chip->oldstate;
2834 wake_up(&chip->wq);
2835 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002836 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 }
2838 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 return ret;
2841}
2842
2843
2844static void cfi_amdstd_resume(struct mtd_info *mtd)
2845{
2846 struct map_info *map = mtd->priv;
2847 struct cfi_private *cfi = map->fldrv_priv;
2848 int i;
2849 struct flchip *chip;
2850
2851 for (i=0; i<cfi->numchips; i++) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002852
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 chip = &cfi->chips[i];
2854
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002855 mutex_lock(&chip->mutex);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002856
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 if (chip->state == FL_PM_SUSPENDED) {
2858 chip->state = FL_READY;
2859 map_write(map, CMD(0xF0), chip->start);
2860 wake_up(&chip->wq);
2861 }
2862 else
2863 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2864
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002865 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 }
2867}
2868
Kevin Cernekeeeafe1312010-04-29 10:26:56 -07002869
2870/*
2871 * Ensure that the flash device is put back into read array mode before
2872 * unloading the driver or rebooting. On some systems, rebooting while
2873 * the flash is in query/program/erase mode will prevent the CPU from
2874 * fetching the bootloader code, requiring a hard reset or power cycle.
2875 */
2876static int cfi_amdstd_reset(struct mtd_info *mtd)
2877{
2878 struct map_info *map = mtd->priv;
2879 struct cfi_private *cfi = map->fldrv_priv;
2880 int i, ret;
2881 struct flchip *chip;
2882
2883 for (i = 0; i < cfi->numchips; i++) {
2884
2885 chip = &cfi->chips[i];
2886
2887 mutex_lock(&chip->mutex);
2888
2889 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2890 if (!ret) {
2891 map_write(map, CMD(0xF0), chip->start);
2892 chip->state = FL_SHUTDOWN;
2893 put_chip(map, chip, chip->start);
2894 }
2895
2896 mutex_unlock(&chip->mutex);
2897 }
2898
2899 return 0;
2900}
2901
2902
2903static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2904 void *v)
2905{
2906 struct mtd_info *mtd;
2907
2908 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2909 cfi_amdstd_reset(mtd);
2910 return NOTIFY_DONE;
2911}
2912
2913
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914static void cfi_amdstd_destroy(struct mtd_info *mtd)
2915{
2916 struct map_info *map = mtd->priv;
2917 struct cfi_private *cfi = map->fldrv_priv;
Jesper Juhlfa671642005-11-07 01:01:27 -08002918
Kevin Cernekeeeafe1312010-04-29 10:26:56 -07002919 cfi_amdstd_reset(mtd);
2920 unregister_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 kfree(cfi->cmdset_priv);
2922 kfree(cfi->cfiq);
2923 kfree(cfi);
2924 kfree(mtd->eraseregions);
2925}
2926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927MODULE_LICENSE("GPL");
2928MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2929MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
Guillaume LECERF80461122010-05-20 16:54:10 +02002930MODULE_ALIAS("cfi_cmdset_0006");
David Woodhouse1e804ce2010-05-20 16:54:05 +02002931MODULE_ALIAS("cfi_cmdset_0701");