blob: 766b2c38568240ceb8505c52ea049f4543a581fc [file] [log] [blame]
Artem Bityutskiye3644da2008-12-08 13:33:29 +02001/*
2 * Copyright (C) 2006-2008 Nokia Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; see the file COPYING. If not, write to the Free Software
15 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Test OOB read and write on MTD device.
18 *
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */
21
Vikram Narayanan04810272012-10-10 23:12:02 +053022#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
Artem Bityutskiye3644da2008-12-08 13:33:29 +020024#include <asm/div64.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/err.h>
29#include <linux/mtd/mtd.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Artem Bityutskiye3644da2008-12-08 13:33:29 +020031#include <linux/sched.h>
Akinobu Mita8dad0492013-02-27 17:05:33 -080032#include <linux/random.h>
Artem Bityutskiye3644da2008-12-08 13:33:29 +020033
Akinobu Mita4bf527a2013-08-03 18:52:09 +090034#include "mtd_test.h"
35
Wolfram Sang74060602011-10-30 00:11:53 +020036static int dev = -EINVAL;
Roger Quadrosafc0ea12014-10-21 16:53:28 +030037static int bitflip_limit;
Artem Bityutskiye3644da2008-12-08 13:33:29 +020038module_param(dev, int, S_IRUGO);
39MODULE_PARM_DESC(dev, "MTD device number to use");
Roger Quadrosafc0ea12014-10-21 16:53:28 +030040module_param(bitflip_limit, int, S_IRUGO);
41MODULE_PARM_DESC(bitflip_limit, "Max. allowed bitflips per page");
Artem Bityutskiye3644da2008-12-08 13:33:29 +020042
43static struct mtd_info *mtd;
44static unsigned char *readbuf;
45static unsigned char *writebuf;
46static unsigned char *bbt;
47
48static int ebcnt;
49static int pgcnt;
50static int errcnt;
51static int use_offset;
52static int use_len;
53static int use_len_max;
54static int vary_offset;
Akinobu Mita8dad0492013-02-27 17:05:33 -080055static struct rnd_state rnd_state;
Artem Bityutskiye3644da2008-12-08 13:33:29 +020056
Artem Bityutskiye3644da2008-12-08 13:33:29 +020057static void do_vary_offset(void)
58{
59 use_len -= 1;
60 if (use_len < 1) {
61 use_offset += 1;
62 if (use_offset >= use_len_max)
63 use_offset = 0;
64 use_len = use_len_max - use_offset;
65 }
66}
67
68static int write_eraseblock(int ebnum)
69{
70 int i;
71 struct mtd_oob_ops ops;
72 int err = 0;
Brian Norrisb9da8ba2015-02-28 02:02:26 -080073 loff_t addr = (loff_t)ebnum * mtd->erasesize;
Artem Bityutskiye3644da2008-12-08 13:33:29 +020074
Akinobu Mitabe54f8f2014-03-08 00:24:10 +090075 prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +020076 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
Brian Norris0612b9d2011-08-30 18:45:40 -070077 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +020078 ops.len = 0;
79 ops.retlen = 0;
80 ops.ooblen = use_len;
81 ops.oobretlen = 0;
82 ops.ooboffs = use_offset;
Hannes Eder23d42492009-03-05 20:15:01 +010083 ops.datbuf = NULL;
Akinobu Mitabe54f8f2014-03-08 00:24:10 +090084 ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +020085 err = mtd_write_oob(mtd, addr, &ops);
Artem Bityutskiye3644da2008-12-08 13:33:29 +020086 if (err || ops.oobretlen != use_len) {
Vikram Narayanan04810272012-10-10 23:12:02 +053087 pr_err("error: writeoob failed at %#llx\n",
Artem Bityutskiye3644da2008-12-08 13:33:29 +020088 (long long)addr);
Vikram Narayanan04810272012-10-10 23:12:02 +053089 pr_err("error: use_len %d, use_offset %d\n",
Artem Bityutskiye3644da2008-12-08 13:33:29 +020090 use_len, use_offset);
91 errcnt += 1;
92 return err ? err : -1;
93 }
94 if (vary_offset)
95 do_vary_offset();
96 }
97
98 return err;
99}
100
101static int write_whole_device(void)
102{
103 int err;
104 unsigned int i;
105
Vikram Narayanan04810272012-10-10 23:12:02 +0530106 pr_info("writing OOBs of whole device\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200107 for (i = 0; i < ebcnt; ++i) {
108 if (bbt[i])
109 continue;
110 err = write_eraseblock(i);
111 if (err)
112 return err;
113 if (i % 256 == 0)
Vikram Narayanan04810272012-10-10 23:12:02 +0530114 pr_info("written up to eraseblock %u\n", i);
Richard Weinberger2a6a28e72015-03-29 21:52:06 +0200115
116 err = mtdtest_relax();
117 if (err)
118 return err;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200119 }
Vikram Narayanan04810272012-10-10 23:12:02 +0530120 pr_info("written %u eraseblocks\n", i);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200121 return 0;
122}
123
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300124/*
125 * Display the address, offset and data bytes at comparison failure.
126 * Return number of bitflips encountered.
127 */
Roger Quadros718e38b2015-07-08 14:50:19 +0300128static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs,
129 const void *ct, size_t count)
Roger Quadros5a660882014-10-21 16:53:27 +0300130{
131 const unsigned char *su1, *su2;
132 int res;
Roger Quadros5a660882014-10-21 16:53:27 +0300133 size_t i = 0;
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300134 size_t bitflips = 0;
Roger Quadros5a660882014-10-21 16:53:27 +0300135
136 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
137 res = *su1 ^ *su2;
138 if (res) {
Roger Quadros718e38b2015-07-08 14:50:19 +0300139 pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0x%x diff 0x%x\n",
140 (unsigned long)addr, (unsigned long)offset + i,
141 *su1, *su2, res);
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300142 bitflips += hweight8(res);
Roger Quadros5a660882014-10-21 16:53:27 +0300143 }
144 }
145
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300146 return bitflips;
Roger Quadros5a660882014-10-21 16:53:27 +0300147}
148
Roger Quadros718e38b2015-07-08 14:50:19 +0300149#define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\
150 (count))
151
Roger Quadrosd2b51c82014-12-05 17:18:39 +0200152/*
153 * Compare with 0xff and show the address, offset and data bytes at
154 * comparison failure. Return number of bitflips encountered.
155 */
156static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
157 size_t count)
158{
159 const unsigned char *su1;
160 int res;
161 size_t i = 0;
162 size_t bitflips = 0;
163
164 for (su1 = cs; 0 < count; ++su1, count--, i++) {
165 res = *su1 ^ 0xff;
166 if (res) {
167 pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
168 (unsigned long)addr, (unsigned long)offset + i,
169 *su1, res);
170 bitflips += hweight8(res);
171 }
172 }
173
174 return bitflips;
175}
176
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200177static int verify_eraseblock(int ebnum)
178{
179 int i;
180 struct mtd_oob_ops ops;
181 int err = 0;
Brian Norris1001ff72014-07-21 19:07:12 -0700182 loff_t addr = (loff_t)ebnum * mtd->erasesize;
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300183 size_t bitflips;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200184
Akinobu Mitabe54f8f2014-03-08 00:24:10 +0900185 prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200186 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
Brian Norris0612b9d2011-08-30 18:45:40 -0700187 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200188 ops.len = 0;
189 ops.retlen = 0;
190 ops.ooblen = use_len;
191 ops.oobretlen = 0;
192 ops.ooboffs = use_offset;
Hannes Eder23d42492009-03-05 20:15:01 +0100193 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200194 ops.oobbuf = readbuf;
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +0200195 err = mtd_read_oob(mtd, addr, &ops);
Miquel Raynal12663b42018-01-11 21:39:20 +0100196 if (mtd_is_bitflip(err))
197 err = 0;
198
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200199 if (err || ops.oobretlen != use_len) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530200 pr_err("error: readoob failed at %#llx\n",
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200201 (long long)addr);
202 errcnt += 1;
203 return err ? err : -1;
204 }
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300205
206 bitflips = memcmpshow(addr, readbuf,
207 writebuf + (use_len_max * i) + use_offset,
208 use_len);
209 if (bitflips > bitflip_limit) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530210 pr_err("error: verify failed at %#llx\n",
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200211 (long long)addr);
212 errcnt += 1;
213 if (errcnt > 1000) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530214 pr_err("error: too many errors\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200215 return -1;
216 }
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300217 } else if (bitflips) {
218 pr_info("ignoring error as within bitflip_limit\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200219 }
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300220
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100221 if (use_offset != 0 || use_len < mtd->oobavail) {
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200222 int k;
223
Brian Norris0612b9d2011-08-30 18:45:40 -0700224 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200225 ops.len = 0;
226 ops.retlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100227 ops.ooblen = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200228 ops.oobretlen = 0;
229 ops.ooboffs = 0;
Hannes Eder23d42492009-03-05 20:15:01 +0100230 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200231 ops.oobbuf = readbuf;
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +0200232 err = mtd_read_oob(mtd, addr, &ops);
Miquel Raynal12663b42018-01-11 21:39:20 +0100233 if (mtd_is_bitflip(err))
234 err = 0;
235
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100236 if (err || ops.oobretlen != mtd->oobavail) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530237 pr_err("error: readoob failed at %#llx\n",
238 (long long)addr);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200239 errcnt += 1;
240 return err ? err : -1;
241 }
Roger Quadros718e38b2015-07-08 14:50:19 +0300242 bitflips = memcmpshowoffset(addr, use_offset,
243 readbuf + use_offset,
244 writebuf + (use_len_max * i) + use_offset,
245 use_len);
Roger Quadrosd2b51c82014-12-05 17:18:39 +0200246
247 /* verify pre-offset area for 0xff */
248 bitflips += memffshow(addr, 0, readbuf, use_offset);
249
250 /* verify post-(use_offset + use_len) area for 0xff */
251 k = use_offset + use_len;
252 bitflips += memffshow(addr, k, readbuf + k,
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100253 mtd->oobavail - k);
Roger Quadrosd2b51c82014-12-05 17:18:39 +0200254
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300255 if (bitflips > bitflip_limit) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530256 pr_err("error: verify failed at %#llx\n",
257 (long long)addr);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200258 errcnt += 1;
259 if (errcnt > 1000) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530260 pr_err("error: too many errors\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200261 return -1;
262 }
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300263 } else if (bitflips) {
Roger Quadrosd2b51c82014-12-05 17:18:39 +0200264 pr_info("ignoring errors as within bitflip limit\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200265 }
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200266 }
267 if (vary_offset)
268 do_vary_offset();
269 }
270 return err;
271}
272
273static int verify_eraseblock_in_one_go(int ebnum)
274{
275 struct mtd_oob_ops ops;
276 int err = 0;
Brian Norris1001ff72014-07-21 19:07:12 -0700277 loff_t addr = (loff_t)ebnum * mtd->erasesize;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100278 size_t len = mtd->oobavail * pgcnt;
279 size_t oobavail = mtd->oobavail;
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300280 size_t bitflips;
281 int i;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200282
Akinobu Mita8dad0492013-02-27 17:05:33 -0800283 prandom_bytes_state(&rnd_state, writebuf, len);
Brian Norris0612b9d2011-08-30 18:45:40 -0700284 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200285 ops.len = 0;
286 ops.retlen = 0;
287 ops.ooblen = len;
288 ops.oobretlen = 0;
289 ops.ooboffs = 0;
Hannes Eder23d42492009-03-05 20:15:01 +0100290 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200291 ops.oobbuf = readbuf;
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300292
293 /* read entire block's OOB at one go */
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +0200294 err = mtd_read_oob(mtd, addr, &ops);
Miquel Raynal12663b42018-01-11 21:39:20 +0100295 if (mtd_is_bitflip(err))
296 err = 0;
297
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200298 if (err || ops.oobretlen != len) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530299 pr_err("error: readoob failed at %#llx\n",
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200300 (long long)addr);
301 errcnt += 1;
302 return err ? err : -1;
303 }
Roger Quadrosafc0ea12014-10-21 16:53:28 +0300304
305 /* verify one page OOB at a time for bitflip per page limit check */
306 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
307 bitflips = memcmpshow(addr, readbuf + (i * oobavail),
308 writebuf + (i * oobavail), oobavail);
309 if (bitflips > bitflip_limit) {
310 pr_err("error: verify failed at %#llx\n",
311 (long long)addr);
312 errcnt += 1;
313 if (errcnt > 1000) {
314 pr_err("error: too many errors\n");
315 return -1;
316 }
317 } else if (bitflips) {
318 pr_info("ignoring error as within bitflip_limit\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200319 }
320 }
321
322 return err;
323}
324
325static int verify_all_eraseblocks(void)
326{
327 int err;
328 unsigned int i;
329
Vikram Narayanan04810272012-10-10 23:12:02 +0530330 pr_info("verifying all eraseblocks\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200331 for (i = 0; i < ebcnt; ++i) {
332 if (bbt[i])
333 continue;
334 err = verify_eraseblock(i);
335 if (err)
336 return err;
337 if (i % 256 == 0)
Vikram Narayanan04810272012-10-10 23:12:02 +0530338 pr_info("verified up to eraseblock %u\n", i);
Richard Weinberger2a6a28e72015-03-29 21:52:06 +0200339
340 err = mtdtest_relax();
341 if (err)
342 return err;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200343 }
Vikram Narayanan04810272012-10-10 23:12:02 +0530344 pr_info("verified %u eraseblocks\n", i);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200345 return 0;
346}
347
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200348static int __init mtd_oobtest_init(void)
349{
350 int err = 0;
351 unsigned int i;
352 uint64_t tmp;
353 struct mtd_oob_ops ops;
354 loff_t addr = 0, addr0;
355
356 printk(KERN_INFO "\n");
357 printk(KERN_INFO "=================================================\n");
Wolfram Sang74060602011-10-30 00:11:53 +0200358
359 if (dev < 0) {
Masanari Iida064a7692012-11-09 23:20:58 +0900360 pr_info("Please specify a valid mtd-device via module parameter\n");
361 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
Wolfram Sang74060602011-10-30 00:11:53 +0200362 return -EINVAL;
363 }
364
Vikram Narayanan04810272012-10-10 23:12:02 +0530365 pr_info("MTD device: %d\n", dev);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200366
367 mtd = get_mtd_device(NULL, dev);
368 if (IS_ERR(mtd)) {
369 err = PTR_ERR(mtd);
Vikram Narayanan04810272012-10-10 23:12:02 +0530370 pr_err("error: cannot get MTD device\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200371 return err;
372 }
373
Huang Shijie818b9732013-09-25 14:58:17 +0800374 if (!mtd_type_is_nand(mtd)) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530375 pr_info("this test requires NAND flash\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200376 goto out;
377 }
378
379 tmp = mtd->size;
380 do_div(tmp, mtd->erasesize);
381 ebcnt = tmp;
382 pgcnt = mtd->erasesize / mtd->writesize;
383
Vikram Narayanan04810272012-10-10 23:12:02 +0530384 pr_info("MTD device size %llu, eraseblock size %u, "
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200385 "page size %u, count of eraseblocks %u, pages per "
386 "eraseblock %u, OOB size %u\n",
387 (unsigned long long)mtd->size, mtd->erasesize,
388 mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
389
390 err = -ENOMEM;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200391 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
Brian Norris33777e62013-05-02 14:18:51 -0700392 if (!readbuf)
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200393 goto out;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200394 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
Brian Norris33777e62013-05-02 14:18:51 -0700395 if (!writebuf)
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200396 goto out;
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900397 bbt = kzalloc(ebcnt, GFP_KERNEL);
398 if (!bbt)
399 goto out;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200400
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900401 err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200402 if (err)
403 goto out;
404
405 use_offset = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100406 use_len = mtd->oobavail;
407 use_len_max = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200408 vary_offset = 0;
409
410 /* First test: write all OOB, read it back and verify */
Vikram Narayanan04810272012-10-10 23:12:02 +0530411 pr_info("test 1 of 5\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200412
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900413 err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200414 if (err)
415 goto out;
416
Akinobu Mita8dad0492013-02-27 17:05:33 -0800417 prandom_seed_state(&rnd_state, 1);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200418 err = write_whole_device();
419 if (err)
420 goto out;
421
Akinobu Mita8dad0492013-02-27 17:05:33 -0800422 prandom_seed_state(&rnd_state, 1);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200423 err = verify_all_eraseblocks();
424 if (err)
425 goto out;
426
427 /*
428 * Second test: write all OOB, a block at a time, read it back and
429 * verify.
430 */
Vikram Narayanan04810272012-10-10 23:12:02 +0530431 pr_info("test 2 of 5\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200432
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900433 err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200434 if (err)
435 goto out;
436
Akinobu Mita8dad0492013-02-27 17:05:33 -0800437 prandom_seed_state(&rnd_state, 3);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200438 err = write_whole_device();
439 if (err)
440 goto out;
441
442 /* Check all eraseblocks */
Akinobu Mita8dad0492013-02-27 17:05:33 -0800443 prandom_seed_state(&rnd_state, 3);
Vikram Narayanan04810272012-10-10 23:12:02 +0530444 pr_info("verifying all eraseblocks\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200445 for (i = 0; i < ebcnt; ++i) {
446 if (bbt[i])
447 continue;
448 err = verify_eraseblock_in_one_go(i);
449 if (err)
450 goto out;
451 if (i % 256 == 0)
Vikram Narayanan04810272012-10-10 23:12:02 +0530452 pr_info("verified up to eraseblock %u\n", i);
Richard Weinberger2a6a28e72015-03-29 21:52:06 +0200453
454 err = mtdtest_relax();
455 if (err)
456 goto out;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200457 }
Vikram Narayanan04810272012-10-10 23:12:02 +0530458 pr_info("verified %u eraseblocks\n", i);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200459
460 /*
461 * Third test: write OOB at varying offsets and lengths, read it back
462 * and verify.
463 */
Vikram Narayanan04810272012-10-10 23:12:02 +0530464 pr_info("test 3 of 5\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200465
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900466 err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200467 if (err)
468 goto out;
469
470 /* Write all eraseblocks */
471 use_offset = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100472 use_len = mtd->oobavail;
473 use_len_max = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200474 vary_offset = 1;
Akinobu Mita8dad0492013-02-27 17:05:33 -0800475 prandom_seed_state(&rnd_state, 5);
Akinobu Mitaf54d6332009-10-09 18:43:52 +0900476
477 err = write_whole_device();
478 if (err)
479 goto out;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200480
481 /* Check all eraseblocks */
482 use_offset = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100483 use_len = mtd->oobavail;
484 use_len_max = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200485 vary_offset = 1;
Akinobu Mita8dad0492013-02-27 17:05:33 -0800486 prandom_seed_state(&rnd_state, 5);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200487 err = verify_all_eraseblocks();
488 if (err)
489 goto out;
490
491 use_offset = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100492 use_len = mtd->oobavail;
493 use_len_max = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200494 vary_offset = 0;
495
496 /* Fourth test: try to write off end of device */
Vikram Narayanan04810272012-10-10 23:12:02 +0530497 pr_info("test 4 of 5\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200498
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900499 err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200500 if (err)
501 goto out;
502
503 addr0 = 0;
Roel Kluinc6f7e7b2009-07-31 16:21:01 +0200504 for (i = 0; i < ebcnt && bbt[i]; ++i)
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200505 addr0 += mtd->erasesize;
506
507 /* Attempt to write off end of OOB */
Brian Norris0612b9d2011-08-30 18:45:40 -0700508 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200509 ops.len = 0;
510 ops.retlen = 0;
511 ops.ooblen = 1;
512 ops.oobretlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100513 ops.ooboffs = mtd->oobavail;
Hannes Eder23d42492009-03-05 20:15:01 +0100514 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200515 ops.oobbuf = writebuf;
Vikram Narayanan04810272012-10-10 23:12:02 +0530516 pr_info("attempting to start write past end of OOB\n");
517 pr_info("an error is expected...\n");
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +0200518 err = mtd_write_oob(mtd, addr0, &ops);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200519 if (err) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530520 pr_info("error occurred as expected\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200521 err = 0;
522 } else {
Vikram Narayanan04810272012-10-10 23:12:02 +0530523 pr_err("error: can write past end of OOB\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200524 errcnt += 1;
525 }
526
527 /* Attempt to read off end of OOB */
Brian Norris0612b9d2011-08-30 18:45:40 -0700528 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200529 ops.len = 0;
530 ops.retlen = 0;
531 ops.ooblen = 1;
532 ops.oobretlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100533 ops.ooboffs = mtd->oobavail;
Hannes Eder23d42492009-03-05 20:15:01 +0100534 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200535 ops.oobbuf = readbuf;
Vikram Narayanan04810272012-10-10 23:12:02 +0530536 pr_info("attempting to start read past end of OOB\n");
537 pr_info("an error is expected...\n");
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +0200538 err = mtd_read_oob(mtd, addr0, &ops);
Miquel Raynal12663b42018-01-11 21:39:20 +0100539 if (mtd_is_bitflip(err))
540 err = 0;
541
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200542 if (err) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530543 pr_info("error occurred as expected\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200544 err = 0;
545 } else {
Vikram Narayanan04810272012-10-10 23:12:02 +0530546 pr_err("error: can read past end of OOB\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200547 errcnt += 1;
548 }
549
550 if (bbt[ebcnt - 1])
Vikram Narayanan04810272012-10-10 23:12:02 +0530551 pr_info("skipping end of device tests because last "
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200552 "block is bad\n");
553 else {
554 /* Attempt to write off end of device */
Brian Norris0612b9d2011-08-30 18:45:40 -0700555 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200556 ops.len = 0;
557 ops.retlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100558 ops.ooblen = mtd->oobavail + 1;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200559 ops.oobretlen = 0;
560 ops.ooboffs = 0;
Hannes Eder23d42492009-03-05 20:15:01 +0100561 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200562 ops.oobbuf = writebuf;
Vikram Narayanan04810272012-10-10 23:12:02 +0530563 pr_info("attempting to write past end of device\n");
564 pr_info("an error is expected...\n");
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +0200565 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200566 if (err) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530567 pr_info("error occurred as expected\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200568 err = 0;
569 } else {
Vikram Narayanan04810272012-10-10 23:12:02 +0530570 pr_err("error: wrote past end of device\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200571 errcnt += 1;
572 }
573
574 /* Attempt to read off end of device */
Brian Norris0612b9d2011-08-30 18:45:40 -0700575 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200576 ops.len = 0;
577 ops.retlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100578 ops.ooblen = mtd->oobavail + 1;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200579 ops.oobretlen = 0;
580 ops.ooboffs = 0;
Hannes Eder23d42492009-03-05 20:15:01 +0100581 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200582 ops.oobbuf = readbuf;
Vikram Narayanan04810272012-10-10 23:12:02 +0530583 pr_info("attempting to read past end of device\n");
584 pr_info("an error is expected...\n");
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +0200585 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
Miquel Raynal12663b42018-01-11 21:39:20 +0100586 if (mtd_is_bitflip(err))
587 err = 0;
588
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200589 if (err) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530590 pr_info("error occurred as expected\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200591 err = 0;
592 } else {
Vikram Narayanan04810272012-10-10 23:12:02 +0530593 pr_err("error: read past end of device\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200594 errcnt += 1;
595 }
596
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900597 err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200598 if (err)
599 goto out;
600
601 /* Attempt to write off end of device */
Brian Norris0612b9d2011-08-30 18:45:40 -0700602 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200603 ops.len = 0;
604 ops.retlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100605 ops.ooblen = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200606 ops.oobretlen = 0;
607 ops.ooboffs = 1;
Hannes Eder23d42492009-03-05 20:15:01 +0100608 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200609 ops.oobbuf = writebuf;
Vikram Narayanan04810272012-10-10 23:12:02 +0530610 pr_info("attempting to write past end of device\n");
611 pr_info("an error is expected...\n");
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +0200612 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200613 if (err) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530614 pr_info("error occurred as expected\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200615 err = 0;
616 } else {
Vikram Narayanan04810272012-10-10 23:12:02 +0530617 pr_err("error: wrote past end of device\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200618 errcnt += 1;
619 }
620
621 /* Attempt to read off end of device */
Brian Norris0612b9d2011-08-30 18:45:40 -0700622 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200623 ops.len = 0;
624 ops.retlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100625 ops.ooblen = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200626 ops.oobretlen = 0;
627 ops.ooboffs = 1;
Hannes Eder23d42492009-03-05 20:15:01 +0100628 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200629 ops.oobbuf = readbuf;
Vikram Narayanan04810272012-10-10 23:12:02 +0530630 pr_info("attempting to read past end of device\n");
631 pr_info("an error is expected...\n");
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +0200632 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
Miquel Raynal12663b42018-01-11 21:39:20 +0100633 if (mtd_is_bitflip(err))
634 err = 0;
635
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200636 if (err) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530637 pr_info("error occurred as expected\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200638 err = 0;
639 } else {
Vikram Narayanan04810272012-10-10 23:12:02 +0530640 pr_err("error: read past end of device\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200641 errcnt += 1;
642 }
643 }
644
645 /* Fifth test: write / read across block boundaries */
Vikram Narayanan04810272012-10-10 23:12:02 +0530646 pr_info("test 5 of 5\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200647
648 /* Erase all eraseblocks */
Akinobu Mita4bf527a2013-08-03 18:52:09 +0900649 err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200650 if (err)
651 goto out;
652
653 /* Write all eraseblocks */
Akinobu Mita8dad0492013-02-27 17:05:33 -0800654 prandom_seed_state(&rnd_state, 11);
Vikram Narayanan04810272012-10-10 23:12:02 +0530655 pr_info("writing OOBs of whole device\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200656 for (i = 0; i < ebcnt - 1; ++i) {
657 int cnt = 2;
658 int pg;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100659 size_t sz = mtd->oobavail;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200660 if (bbt[i] || bbt[i + 1])
661 continue;
Brian Norris1001ff72014-07-21 19:07:12 -0700662 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
Akinobu Mitabe54f8f2014-03-08 00:24:10 +0900663 prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200664 for (pg = 0; pg < cnt; ++pg) {
Brian Norris0612b9d2011-08-30 18:45:40 -0700665 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200666 ops.len = 0;
667 ops.retlen = 0;
668 ops.ooblen = sz;
669 ops.oobretlen = 0;
670 ops.ooboffs = 0;
Hannes Eder23d42492009-03-05 20:15:01 +0100671 ops.datbuf = NULL;
Akinobu Mitabe54f8f2014-03-08 00:24:10 +0900672 ops.oobbuf = writebuf + pg * sz;
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +0200673 err = mtd_write_oob(mtd, addr, &ops);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200674 if (err)
675 goto out;
676 if (i % 256 == 0)
Vikram Narayanan04810272012-10-10 23:12:02 +0530677 pr_info("written up to eraseblock %u\n", i);
Richard Weinberger2a6a28e72015-03-29 21:52:06 +0200678
679 err = mtdtest_relax();
680 if (err)
681 goto out;
682
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200683 addr += mtd->writesize;
684 }
685 }
Vikram Narayanan04810272012-10-10 23:12:02 +0530686 pr_info("written %u eraseblocks\n", i);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200687
688 /* Check all eraseblocks */
Akinobu Mita8dad0492013-02-27 17:05:33 -0800689 prandom_seed_state(&rnd_state, 11);
Vikram Narayanan04810272012-10-10 23:12:02 +0530690 pr_info("verifying all eraseblocks\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200691 for (i = 0; i < ebcnt - 1; ++i) {
692 if (bbt[i] || bbt[i + 1])
693 continue;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100694 prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
Brian Norris1001ff72014-07-21 19:07:12 -0700695 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
Brian Norris0612b9d2011-08-30 18:45:40 -0700696 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200697 ops.len = 0;
698 ops.retlen = 0;
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100699 ops.ooblen = mtd->oobavail * 2;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200700 ops.oobretlen = 0;
701 ops.ooboffs = 0;
Hannes Eder23d42492009-03-05 20:15:01 +0100702 ops.datbuf = NULL;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200703 ops.oobbuf = readbuf;
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +0200704 err = mtd_read_oob(mtd, addr, &ops);
Miquel Raynal12663b42018-01-11 21:39:20 +0100705 if (mtd_is_bitflip(err))
706 err = 0;
707
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200708 if (err)
709 goto out;
Roger Quadros5a660882014-10-21 16:53:27 +0300710 if (memcmpshow(addr, readbuf, writebuf,
Boris BREZILLONf5b8aa72016-03-07 10:46:51 +0100711 mtd->oobavail * 2)) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530712 pr_err("error: verify failed at %#llx\n",
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200713 (long long)addr);
714 errcnt += 1;
715 if (errcnt > 1000) {
Vikram Narayanan04810272012-10-10 23:12:02 +0530716 pr_err("error: too many errors\n");
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200717 goto out;
718 }
719 }
720 if (i % 256 == 0)
Vikram Narayanan04810272012-10-10 23:12:02 +0530721 pr_info("verified up to eraseblock %u\n", i);
Richard Weinberger2a6a28e72015-03-29 21:52:06 +0200722
723 err = mtdtest_relax();
724 if (err)
725 goto out;
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200726 }
Vikram Narayanan04810272012-10-10 23:12:02 +0530727 pr_info("verified %u eraseblocks\n", i);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200728
Vikram Narayanan04810272012-10-10 23:12:02 +0530729 pr_info("finished with %d errors\n", errcnt);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200730out:
731 kfree(bbt);
732 kfree(writebuf);
733 kfree(readbuf);
734 put_mtd_device(mtd);
735 if (err)
Vikram Narayanan04810272012-10-10 23:12:02 +0530736 pr_info("error %d occurred\n", err);
Artem Bityutskiye3644da2008-12-08 13:33:29 +0200737 printk(KERN_INFO "=================================================\n");
738 return err;
739}
740module_init(mtd_oobtest_init);
741
742static void __exit mtd_oobtest_exit(void)
743{
744 return;
745}
746module_exit(mtd_oobtest_exit);
747
748MODULE_DESCRIPTION("Out-of-band test module");
749MODULE_AUTHOR("Adrian Hunter");
750MODULE_LICENSE("GPL");