blob: b57b84fb97d0a356834a4b80e28ba1b8d71e3855 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04002/*
3 * Copyright (c) International Business Machines Corp., 2006
4 * Copyright (c) Nokia Corporation, 2006, 2007
5 *
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04006 * Author: Artem Bityutskiy (Битюцкий Артём)
7 */
8
9/*
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030010 * UBI input/output sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040011 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030012 * This sub-system provides a uniform way to work with all kinds of the
13 * underlying MTD devices. It also implements handy functions for reading and
14 * writing UBI headers.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040015 *
16 * We are trying to have a paranoid mindset and not to trust to what we read
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030017 * from the flash media in order to be more secure and robust. So this
18 * sub-system validates every single header it reads from the flash media.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040019 *
20 * Some words about how the eraseblock headers are stored.
21 *
22 * The erase counter header is always stored at offset zero. By default, the
23 * VID header is stored after the EC header at the closest aligned offset
24 * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
25 * header at the closest aligned offset. But this default layout may be
26 * changed. For example, for different reasons (e.g., optimization) UBI may be
27 * asked to put the VID header at further offset, and even at an unaligned
28 * offset. Of course, if the offset of the VID header is unaligned, UBI adds
29 * proper padding in front of it. Data offset may also be changed but it has to
30 * be aligned.
31 *
32 * About minimal I/O units. In general, UBI assumes flash device model where
33 * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
34 * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
Andrew F. Davis2fae1312017-01-05 14:44:49 -060035 * @ubi->mtd->writesize field. But as an exception, UBI admits use of another
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040036 * (smaller) minimal I/O unit size for EC and VID headers to make it possible
37 * to do different optimizations.
38 *
39 * This is extremely useful in case of NAND flashes which admit of several
40 * write operations to one NAND page. In this case UBI can fit EC and VID
41 * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
42 * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
43 * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
44 * users.
45 *
46 * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
47 * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
48 * headers.
49 *
50 * Q: why not just to treat sub-page as a minimal I/O unit of this flash
51 * device, e.g., make @ubi->min_io_size = 512 in the example above?
52 *
53 * A: because when writing a sub-page, MTD still writes a full 2K page but the
Shinya Kuribayashibe436f62010-05-06 19:22:09 +090054 * bytes which are not relevant to the sub-page are 0xFF. So, basically,
55 * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
56 * Thus, we prefer to use sub-pages only for EC and VID headers.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040057 *
58 * As it was noted above, the VID header may start at a non-aligned offset.
59 * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
60 * the VID header may reside at offset 1984 which is the last 64 bytes of the
61 * last sub-page (EC header is always at offset zero). This causes some
62 * difficulties when reading and writing VID headers.
63 *
64 * Suppose we have a 64-byte buffer and we read a VID header at it. We change
65 * the data and want to write this VID header out. As we can only write in
66 * 512-byte chunks, we have to allocate one more buffer and copy our VID header
67 * to offset 448 of this buffer.
68 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030069 * The I/O sub-system does the following trick in order to avoid this extra
70 * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
71 * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
72 * When the VID header is being written out, it shifts the VID header pointer
73 * back and writes the whole sub-page.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040074 */
75
76#include <linux/crc32.h>
77#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090078#include <linux/slab.h>
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040079#include "ubi.h"
80
Artem Bityutskiy8056eb42012-05-16 18:24:09 +030081static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
82static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
83static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
84 const struct ubi_ec_hdr *ec_hdr);
85static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
86static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
87 const struct ubi_vid_hdr *vid_hdr);
Artem Bityutskiy97d61042012-05-16 19:29:04 +030088static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
89 int offset, int len);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040090
91/**
92 * ubi_io_read - read data from a physical eraseblock.
93 * @ubi: UBI device description object
94 * @buf: buffer where to store the read data
95 * @pnum: physical eraseblock number to read from
96 * @offset: offset within the physical eraseblock from where to read
97 * @len: how many bytes to read
98 *
99 * This function reads data from offset @offset of physical eraseblock @pnum
100 * and stores the read data in the @buf buffer. The following return codes are
101 * possible:
102 *
103 * o %0 if all the requested data were successfully read;
104 * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
105 * correctable bit-flips were detected; this is harmless but may indicate
106 * that this eraseblock may become bad soon (but do not have to);
Artem Bityutskiy63b6c1e2007-07-17 15:04:20 +0300107 * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
108 * example it can be an ECC error in case of NAND; this most probably means
109 * that the data is corrupted;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400110 * o %-EIO if some I/O error occurred;
111 * o other negative error codes in case of other errors.
112 */
113int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
114 int len)
115{
116 int err, retries = 0;
117 size_t read;
118 loff_t addr;
119
120 dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
121
122 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
123 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
124 ubi_assert(len > 0);
125
Artem Bityutskiy8056eb42012-05-16 18:24:09 +0300126 err = self_check_not_bad(ubi, pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400127 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200128 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400129
Artem Bityutskiy276832d2010-11-13 15:08:29 +0200130 /*
131 * Deliberately corrupt the buffer to improve robustness. Indeed, if we
132 * do not do this, the following may happen:
133 * 1. The buffer contains data from previous operation, e.g., read from
134 * another PEB previously. The data looks like expected, e.g., if we
135 * just do not read anything and return - the caller would not
136 * notice this. E.g., if we are reading a VID header, the buffer may
137 * contain a valid VID header from another PEB.
138 * 2. The driver is buggy and returns us success or -EBADMSG or
139 * -EUCLEAN, but it does not actually put any data to the buffer.
140 *
141 * This may confuse UBI or upper layers - they may think the buffer
142 * contains valid data while in fact it is just old data. This is
143 * especially possible because UBI (and UBIFS) relies on CRC, and
144 * treats data as correct even in case of ECC errors if the CRC is
145 * correct.
146 *
147 * Try to prevent this situation by changing the first byte of the
148 * buffer.
149 */
150 *((uint8_t *)buf) ^= 0xFF;
151
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400152 addr = (loff_t)pnum * ubi->peb_size + offset;
153retry:
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200154 err = mtd_read(ubi->mtd, addr, len, &read, buf);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400155 if (err) {
Brian Norrisd57f40542011-09-20 18:34:25 -0700156 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
Artem Bityutskiy1a49af22010-06-08 10:59:07 +0300157
Brian Norrisd57f40542011-09-20 18:34:25 -0700158 if (mtd_is_bitflip(err)) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400159 /*
160 * -EUCLEAN is reported if there was a bit-flip which
161 * was corrected, so this is harmless.
Artem Bityutskiy8c1e6ee2008-07-18 12:20:23 +0300162 *
163 * We do not report about it here unless debugging is
164 * enabled. A corresponding message will be printed
165 * later, when it is has been scrubbed.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400166 */
Tanya Brokhman326087032014-10-20 19:57:00 +0300167 ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
168 pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400169 ubi_assert(len == read);
170 return UBI_IO_BITFLIPS;
171 }
172
Artem Bityutskiya87f29c2010-10-31 18:55:30 +0200173 if (retries++ < UBI_IO_RETRIES) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300174 ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
Artem Bityutskiyf01e2d12012-04-25 09:15:38 +0300175 err, errstr, len, pnum, offset, read);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400176 yield();
177 goto retry;
178 }
179
Tanya Brokhman326087032014-10-20 19:57:00 +0300180 ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300181 err, errstr, len, pnum, offset, read);
Artem Bityutskiy25886a32012-04-24 06:59:49 +0300182 dump_stack();
Artem Bityutskiy2362a532007-10-18 20:09:41 +0300183
184 /*
185 * The driver should never return -EBADMSG if it failed to read
186 * all the requested data. But some buggy drivers might do
187 * this, so we change it to -EIO.
188 */
Brian Norrisd57f40542011-09-20 18:34:25 -0700189 if (read != len && mtd_is_eccerr(err)) {
Artem Bityutskiy2362a532007-10-18 20:09:41 +0300190 ubi_assert(0);
191 err = -EIO;
192 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400193 } else {
194 ubi_assert(len == read);
195
Artem Bityutskiy27a0f2a2011-05-18 16:03:23 +0300196 if (ubi_dbg_is_bitflip(ubi)) {
Artem Bityutskiyc8566352008-07-16 17:40:22 +0300197 dbg_gen("bit-flip (emulated)");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400198 err = UBI_IO_BITFLIPS;
199 }
200 }
201
202 return err;
203}
204
205/**
206 * ubi_io_write - write data to a physical eraseblock.
207 * @ubi: UBI device description object
208 * @buf: buffer with the data to write
209 * @pnum: physical eraseblock number to write to
210 * @offset: offset within the physical eraseblock where to write
211 * @len: how many bytes to write
212 *
213 * This function writes @len bytes of data from buffer @buf to offset @offset
214 * of physical eraseblock @pnum. If all the data were successfully written,
215 * zero is returned. If an error occurred, this function returns a negative
216 * error code. If %-EIO is returned, the physical eraseblock most probably went
217 * bad.
218 *
219 * Note, in case of an error, it is possible that something was still written
220 * to the flash media, but may be some garbage.
221 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300222int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
223 int len)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400224{
225 int err;
226 size_t written;
227 loff_t addr;
228
229 dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
230
231 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
232 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
233 ubi_assert(offset % ubi->hdrs_min_io_size == 0);
234 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
235
236 if (ubi->ro_mode) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300237 ubi_err(ubi, "read-only mode");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400238 return -EROFS;
239 }
240
Artem Bityutskiy8056eb42012-05-16 18:24:09 +0300241 err = self_check_not_bad(ubi, pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400242 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200243 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400244
245 /* The area we are writing to has to contain all 0xFF bytes */
Artem Bityutskiy97d61042012-05-16 19:29:04 +0300246 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400247 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200248 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400249
250 if (offset >= ubi->leb_start) {
251 /*
252 * We write to the data area of the physical eraseblock. Make
253 * sure it has valid EC and VID headers.
254 */
Artem Bityutskiy8056eb42012-05-16 18:24:09 +0300255 err = self_check_peb_ec_hdr(ubi, pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400256 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200257 return err;
Artem Bityutskiy8056eb42012-05-16 18:24:09 +0300258 err = self_check_peb_vid_hdr(ubi, pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400259 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200260 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400261 }
262
Artem Bityutskiy27a0f2a2011-05-18 16:03:23 +0300263 if (ubi_dbg_is_write_failure(ubi)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300264 ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300265 len, pnum, offset);
Artem Bityutskiy25886a32012-04-24 06:59:49 +0300266 dump_stack();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400267 return -EIO;
268 }
269
270 addr = (loff_t)pnum * ubi->peb_size + offset;
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200271 err = mtd_write(ubi->mtd, addr, len, &written, buf);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400272 if (err) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300273 ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300274 err, len, pnum, offset, written);
Artem Bityutskiy25886a32012-04-24 06:59:49 +0300275 dump_stack();
Artem Bityutskiyef7088e2012-04-24 07:10:33 +0300276 ubi_dump_flash(ubi, pnum, offset, len);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400277 } else
278 ubi_assert(written == len);
279
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +0200280 if (!err) {
Artem Bityutskiy97d61042012-05-16 19:29:04 +0300281 err = self_check_write(ubi, buf, pnum, offset, len);
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +0200282 if (err)
283 return err;
284
285 /*
286 * Since we always write sequentially, the rest of the PEB has
287 * to contain only 0xFF bytes.
288 */
289 offset += len;
290 len = ubi->peb_size - offset;
291 if (len)
Artem Bityutskiy97d61042012-05-16 19:29:04 +0300292 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +0200293 }
294
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400295 return err;
296}
297
298/**
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400299 * do_sync_erase - synchronously erase a physical eraseblock.
300 * @ubi: UBI device description object
301 * @pnum: the physical eraseblock number to erase
302 *
303 * This function synchronously erases physical eraseblock @pnum and returns
304 * zero in case of success and a negative error code in case of failure. If
305 * %-EIO is returned, the physical eraseblock most probably went bad.
306 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300307static int do_sync_erase(struct ubi_device *ubi, int pnum)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400308{
309 int err, retries = 0;
310 struct erase_info ei;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400311
312 dbg_io("erase PEB %d", pnum);
Artem Bityutskiy3efe5092011-03-18 18:11:42 +0200313 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
314
315 if (ubi->ro_mode) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300316 ubi_err(ubi, "read-only mode");
Artem Bityutskiy3efe5092011-03-18 18:11:42 +0200317 return -EROFS;
318 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400319
320retry:
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400321 memset(&ei, 0, sizeof(struct erase_info));
322
Brijesh Singh2f176f72007-07-05 15:07:35 +0530323 ei.addr = (loff_t)pnum * ubi->peb_size;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400324 ei.len = ubi->peb_size;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400325
Artem Bityutskiy7e1f0dc2011-12-23 15:25:39 +0200326 err = mtd_erase(ubi->mtd, &ei);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400327 if (err) {
328 if (retries++ < UBI_IO_RETRIES) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300329 ubi_warn(ubi, "error %d while erasing PEB %d, retry",
Artem Bityutskiyf01e2d12012-04-25 09:15:38 +0300330 err, pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400331 yield();
332 goto retry;
333 }
Tanya Brokhman326087032014-10-20 19:57:00 +0300334 ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
Artem Bityutskiy25886a32012-04-24 06:59:49 +0300335 dump_stack();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400336 return err;
337 }
338
Artem Bityutskiy97d61042012-05-16 19:29:04 +0300339 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400340 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200341 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400342
Artem Bityutskiy27a0f2a2011-05-18 16:03:23 +0300343 if (ubi_dbg_is_erase_failure(ubi)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300344 ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400345 return -EIO;
346 }
347
348 return 0;
349}
350
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400351/* Patterns to write to a physical eraseblock when torturing it */
352static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
353
354/**
355 * torture_peb - test a supposedly bad physical eraseblock.
356 * @ubi: UBI device description object
357 * @pnum: the physical eraseblock number to test
358 *
359 * This function returns %-EIO if the physical eraseblock did not pass the
360 * test, a positive number of erase operations done if the test was
361 * successfully passed, and other negative error codes in case of other errors.
362 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300363static int torture_peb(struct ubi_device *ubi, int pnum)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400364{
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400365 int err, i, patt_count;
366
Tanya Brokhman326087032014-10-20 19:57:00 +0300367 ubi_msg(ubi, "run torture test for PEB %d", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400368 patt_count = ARRAY_SIZE(patterns);
369 ubi_assert(patt_count > 0);
370
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300371 mutex_lock(&ubi->buf_mutex);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400372 for (i = 0; i < patt_count; i++) {
373 err = do_sync_erase(ubi, pnum);
374 if (err)
375 goto out;
376
377 /* Make sure the PEB contains only 0xFF bytes */
Artem Bityutskiy0ca39d72012-03-08 15:29:37 +0200378 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400379 if (err)
380 goto out;
381
Artem Bityutskiy0ca39d72012-03-08 15:29:37 +0200382 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400383 if (err == 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300384 ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400385 pnum);
386 err = -EIO;
387 goto out;
388 }
389
390 /* Write a pattern and check it */
Artem Bityutskiy0ca39d72012-03-08 15:29:37 +0200391 memset(ubi->peb_buf, patterns[i], ubi->peb_size);
392 err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400393 if (err)
394 goto out;
395
Artem Bityutskiy0ca39d72012-03-08 15:29:37 +0200396 memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
397 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400398 if (err)
399 goto out;
400
Artem Bityutskiy0ca39d72012-03-08 15:29:37 +0200401 err = ubi_check_pattern(ubi->peb_buf, patterns[i],
Artem Bityutskiybb00e182010-07-31 09:37:34 +0300402 ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400403 if (err == 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300404 ubi_err(ubi, "pattern %x checking failed for PEB %d",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400405 patterns[i], pnum);
406 err = -EIO;
407 goto out;
408 }
409 }
410
411 err = patt_count;
Tanya Brokhman326087032014-10-20 19:57:00 +0300412 ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400413
414out:
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300415 mutex_unlock(&ubi->buf_mutex);
Brian Norrisd57f40542011-09-20 18:34:25 -0700416 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400417 /*
418 * If a bit-flip or data integrity error was detected, the test
419 * has not passed because it happened on a freshly erased
420 * physical eraseblock which means something is wrong with it.
421 */
Tanya Brokhman326087032014-10-20 19:57:00 +0300422 ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
Artem Bityutskiy8d2d4012007-07-22 22:32:51 +0300423 pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400424 err = -EIO;
Artem Bityutskiy8d2d4012007-07-22 22:32:51 +0300425 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400426 return err;
427}
428
429/**
Artem Bityutskiyebf53f42009-07-06 08:57:53 +0300430 * nor_erase_prepare - prepare a NOR flash PEB for erasure.
431 * @ubi: UBI device description object
432 * @pnum: physical eraseblock number to prepare
433 *
434 * NOR flash, or at least some of them, have peculiar embedded PEB erasure
435 * algorithm: the PEB is first filled with zeroes, then it is erased. And
436 * filling with zeroes starts from the end of the PEB. This was observed with
437 * Spansion S29GL512N NOR flash.
438 *
439 * This means that in case of a power cut we may end up with intact data at the
440 * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
441 * EC and VID headers are OK, but a large chunk of data at the end of PEB is
442 * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
443 * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
444 *
445 * This function is called before erasing NOR PEBs and it zeroes out EC and VID
446 * magic numbers in order to invalidate them and prevent the failures. Returns
447 * zero in case of success and a negative error code in case of failure.
448 */
449static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
450{
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000451 int err;
Artem Bityutskiyebf53f42009-07-06 08:57:53 +0300452 size_t written;
453 loff_t addr;
454 uint32_t data = 0;
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000455 struct ubi_ec_hdr ec_hdr;
Boris Brezillon3291b522016-09-16 16:59:26 +0200456 struct ubi_vid_io_buf vidb;
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000457
Artem Bityutskiy2fff5702010-12-03 15:32:21 +0200458 /*
459 * Note, we cannot generally define VID header buffers on stack,
460 * because of the way we deal with these buffers (see the header
461 * comment in this file). But we know this is a NOR-specific piece of
462 * code, so we can do this. But yes, this is error-prone and we should
463 * (pre-)allocate VID header buffer instead.
464 */
Artem Bityutskiyde75c772009-07-24 16:18:04 +0300465 struct ubi_vid_hdr vid_hdr;
Artem Bityutskiyebf53f42009-07-06 08:57:53 +0300466
Artem Bityutskiy7ac760c2010-12-02 06:34:01 +0200467 /*
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000468 * If VID or EC is valid, we have to corrupt them before erasing.
Artem Bityutskiy7ac760c2010-12-02 06:34:01 +0200469 * It is important to first invalidate the EC header, and then the VID
470 * header. Otherwise a power cut may lead to valid EC header and
471 * invalid VID header, in which case UBI will treat this PEB as
Artem Bityutskiyfbd01072012-05-17 16:12:26 +0300472 * corrupted and will try to preserve it, and print scary warnings.
Artem Bityutskiy7ac760c2010-12-02 06:34:01 +0200473 */
474 addr = (loff_t)pnum * ubi->peb_size;
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000475 err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
476 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
477 err != UBI_IO_FF){
478 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
479 if(err)
480 goto error;
481 }
482
Boris Brezillon3291b522016-09-16 16:59:26 +0200483 ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
484 ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
485
486 err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000487 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
488 err != UBI_IO_FF){
Artem Bityutskiy7ac760c2010-12-02 06:34:01 +0200489 addr += ubi->vid_hdr_aloffset;
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200490 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000491 if (err)
492 goto error;
Artem Bityutskiyebf53f42009-07-06 08:57:53 +0300493 }
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000494 return 0;
Artem Bityutskiyebf53f42009-07-06 08:57:53 +0300495
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000496error:
Artem Bityutskiyde75c772009-07-24 16:18:04 +0300497 /*
Qi Wang 王起 (qiwang)2c7ca5c2014-01-01 13:06:11 +0000498 * The PEB contains a valid VID or EC header, but we cannot invalidate
499 * it. Supposedly the flash media or the driver is screwed up, so
500 * return an error.
Artem Bityutskiyde75c772009-07-24 16:18:04 +0300501 */
Tanya Brokhman326087032014-10-20 19:57:00 +0300502 ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
Artem Bityutskiyef7088e2012-04-24 07:10:33 +0300503 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
Artem Bityutskiyde75c772009-07-24 16:18:04 +0300504 return -EIO;
Artem Bityutskiyebf53f42009-07-06 08:57:53 +0300505}
506
507/**
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400508 * ubi_io_sync_erase - synchronously erase a physical eraseblock.
509 * @ubi: UBI device description object
510 * @pnum: physical eraseblock number to erase
511 * @torture: if this physical eraseblock has to be tortured
512 *
513 * This function synchronously erases physical eraseblock @pnum. If @torture
514 * flag is not zero, the physical eraseblock is checked by means of writing
515 * different patterns to it and reading them back. If the torturing is enabled,
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200516 * the physical eraseblock is erased more than once.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400517 *
518 * This function returns the number of erasures made in case of success, %-EIO
519 * if the erasure failed or the torturing test failed, and other negative error
520 * codes in case of other errors. Note, %-EIO means that the physical
521 * eraseblock is bad.
522 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300523int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400524{
525 int err, ret = 0;
526
527 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
528
Artem Bityutskiy8056eb42012-05-16 18:24:09 +0300529 err = self_check_not_bad(ubi, pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400530 if (err != 0)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200531 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400532
533 if (ubi->ro_mode) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300534 ubi_err(ubi, "read-only mode");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400535 return -EROFS;
536 }
537
Artem Bityutskiyebf53f42009-07-06 08:57:53 +0300538 if (ubi->nor_flash) {
539 err = nor_erase_prepare(ubi, pnum);
540 if (err)
541 return err;
542 }
543
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400544 if (torture) {
545 ret = torture_peb(ubi, pnum);
546 if (ret < 0)
547 return ret;
548 }
549
550 err = do_sync_erase(ubi, pnum);
551 if (err)
552 return err;
553
554 return ret + 1;
555}
556
557/**
558 * ubi_io_is_bad - check if a physical eraseblock is bad.
559 * @ubi: UBI device description object
560 * @pnum: the physical eraseblock number to check
561 *
562 * This function returns a positive number if the physical eraseblock is bad,
563 * zero if not, and a negative error code if an error occurred.
564 */
565int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
566{
567 struct mtd_info *mtd = ubi->mtd;
568
569 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
570
571 if (ubi->bad_allowed) {
572 int ret;
573
Artem Bityutskiy7086c192011-12-23 19:35:30 +0200574 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400575 if (ret < 0)
Tanya Brokhman326087032014-10-20 19:57:00 +0300576 ubi_err(ubi, "error %d while checking if PEB %d is bad",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400577 ret, pnum);
578 else if (ret)
579 dbg_io("PEB %d is bad", pnum);
580 return ret;
581 }
582
583 return 0;
584}
585
586/**
587 * ubi_io_mark_bad - mark a physical eraseblock as bad.
588 * @ubi: UBI device description object
589 * @pnum: the physical eraseblock number to mark
590 *
591 * This function returns zero in case of success and a negative error code in
592 * case of failure.
593 */
594int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
595{
596 int err;
597 struct mtd_info *mtd = ubi->mtd;
598
599 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
600
601 if (ubi->ro_mode) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300602 ubi_err(ubi, "read-only mode");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400603 return -EROFS;
604 }
605
606 if (!ubi->bad_allowed)
607 return 0;
608
Artem Bityutskiy5942ddb2011-12-23 19:37:38 +0200609 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400610 if (err)
Tanya Brokhman326087032014-10-20 19:57:00 +0300611 ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400612 return err;
613}
614
615/**
616 * validate_ec_hdr - validate an erase counter header.
617 * @ubi: UBI device description object
618 * @ec_hdr: the erase counter header to check
619 *
620 * This function returns zero if the erase counter header is OK, and %1 if
621 * not.
622 */
Artem Bityutskiyfe96efc2009-06-30 16:11:59 +0300623static int validate_ec_hdr(const struct ubi_device *ubi,
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400624 const struct ubi_ec_hdr *ec_hdr)
625{
626 long long ec;
Artem Bityutskiyfe96efc2009-06-30 16:11:59 +0300627 int vid_hdr_offset, leb_start;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400628
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300629 ec = be64_to_cpu(ec_hdr->ec);
630 vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
631 leb_start = be32_to_cpu(ec_hdr->data_offset);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400632
633 if (ec_hdr->version != UBI_VERSION) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300634 ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400635 UBI_VERSION, (int)ec_hdr->version);
636 goto bad;
637 }
638
639 if (vid_hdr_offset != ubi->vid_hdr_offset) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300640 ubi_err(ubi, "bad VID header offset %d, expected %d",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400641 vid_hdr_offset, ubi->vid_hdr_offset);
642 goto bad;
643 }
644
645 if (leb_start != ubi->leb_start) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300646 ubi_err(ubi, "bad data offset %d, expected %d",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400647 leb_start, ubi->leb_start);
648 goto bad;
649 }
650
651 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300652 ubi_err(ubi, "bad erase counter %lld", ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400653 goto bad;
654 }
655
656 return 0;
657
658bad:
Tanya Brokhman326087032014-10-20 19:57:00 +0300659 ubi_err(ubi, "bad EC header");
Artem Bityutskiya904e3f2012-04-25 09:02:44 +0300660 ubi_dump_ec_hdr(ec_hdr);
Artem Bityutskiy25886a32012-04-24 06:59:49 +0300661 dump_stack();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400662 return 1;
663}
664
665/**
666 * ubi_io_read_ec_hdr - read and check an erase counter header.
667 * @ubi: UBI device description object
668 * @pnum: physical eraseblock to read from
669 * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
670 * header
671 * @verbose: be verbose if the header is corrupted or was not found
672 *
673 * This function reads erase counter header from physical eraseblock @pnum and
674 * stores it in @ec_hdr. This function also checks CRC checksum of the read
675 * erase counter header. The following codes may be returned:
676 *
677 * o %0 if the CRC checksum is correct and the header was successfully read;
678 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
679 * and corrected by the flash driver; this is harmless but may indicate that
680 * this eraseblock may become bad soon (but may be not);
Artem Bityutskiy786d7832010-04-30 16:50:22 +0300681 * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
Artem Bityutskiy756e1df2010-09-03 01:30:16 +0300682 * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
683 * a data integrity error (uncorrectable ECC error in case of NAND);
Artem Bityutskiy74d82d22010-09-03 02:11:20 +0300684 * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400685 * o a negative error code in case of failure.
686 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300687int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400688 struct ubi_ec_hdr *ec_hdr, int verbose)
689{
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300690 int err, read_err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400691 uint32_t crc, magic, hdr_crc;
692
693 dbg_io("read EC header from PEB %d", pnum);
694 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
695
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300696 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
697 if (read_err) {
Brian Norrisd57f40542011-09-20 18:34:25 -0700698 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300699 return read_err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400700
701 /*
702 * We read all the data, but either a correctable bit-flip
Artem Bityutskiy756e1df2010-09-03 01:30:16 +0300703 * occurred, or MTD reported a data integrity error
704 * (uncorrectable ECC error in case of NAND). The former is
705 * harmless, the later may mean that the read data is
706 * corrupted. But we have a CRC check-sum and we will detect
707 * this. If the EC header is still OK, we just report this as
708 * there was a bit-flip, to force scrubbing.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400709 */
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400710 }
711
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300712 magic = be32_to_cpu(ec_hdr->magic);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400713 if (magic != UBI_EC_HDR_MAGIC) {
Brian Norrisd57f40542011-09-20 18:34:25 -0700714 if (mtd_is_eccerr(read_err))
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300715 return UBI_IO_BAD_HDR_EBADMSG;
Artem Bityutskiyeb895802010-05-03 09:04:39 +0300716
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400717 /*
718 * The magic field is wrong. Let's check if we have read all
719 * 0xFF. If yes, this physical eraseblock is assumed to be
720 * empty.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400721 */
Artem Bityutskiybb00e182010-07-31 09:37:34 +0300722 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400723 /* The physical eraseblock is supposedly empty */
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400724 if (verbose)
Tanya Brokhman326087032014-10-20 19:57:00 +0300725 ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300726 pnum);
727 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
728 pnum);
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300729 if (!read_err)
730 return UBI_IO_FF;
731 else
732 return UBI_IO_FF_BITFLIPS;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400733 }
734
735 /*
736 * This is not a valid erase counter header, and these are not
737 * 0xFF bytes. Report that the header is corrupted.
738 */
739 if (verbose) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300740 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300741 pnum, magic, UBI_EC_HDR_MAGIC);
Artem Bityutskiya904e3f2012-04-25 09:02:44 +0300742 ubi_dump_ec_hdr(ec_hdr);
Artem Bityutskiy6f9fdf62011-03-11 13:08:51 +0200743 }
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300744 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
745 pnum, magic, UBI_EC_HDR_MAGIC);
Artem Bityutskiy786d7832010-04-30 16:50:22 +0300746 return UBI_IO_BAD_HDR;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400747 }
748
749 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300750 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400751
752 if (hdr_crc != crc) {
753 if (verbose) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300754 ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300755 pnum, crc, hdr_crc);
Artem Bityutskiya904e3f2012-04-25 09:02:44 +0300756 ubi_dump_ec_hdr(ec_hdr);
Artem Bityutskiy6f9fdf62011-03-11 13:08:51 +0200757 }
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300758 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
759 pnum, crc, hdr_crc);
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300760
761 if (!read_err)
762 return UBI_IO_BAD_HDR;
763 else
764 return UBI_IO_BAD_HDR_EBADMSG;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400765 }
766
767 /* And of course validate what has just been read from the media */
768 err = validate_ec_hdr(ubi, ec_hdr);
769 if (err) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300770 ubi_err(ubi, "validation failed for PEB %d", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400771 return -EINVAL;
772 }
773
Artem Bityutskiyeb895802010-05-03 09:04:39 +0300774 /*
775 * If there was %-EBADMSG, but the header CRC is still OK, report about
776 * a bit-flip to force scrubbing on this PEB.
777 */
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400778 return read_err ? UBI_IO_BITFLIPS : 0;
779}
780
781/**
782 * ubi_io_write_ec_hdr - write an erase counter header.
783 * @ubi: UBI device description object
784 * @pnum: physical eraseblock to write to
785 * @ec_hdr: the erase counter header to write
786 *
787 * This function writes erase counter header described by @ec_hdr to physical
788 * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
789 * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
790 * field.
791 *
792 * This function returns zero in case of success and a negative error code in
793 * case of failure. If %-EIO is returned, the physical eraseblock most probably
794 * went bad.
795 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300796int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400797 struct ubi_ec_hdr *ec_hdr)
798{
799 int err;
800 uint32_t crc;
801
802 dbg_io("write EC header to PEB %d", pnum);
803 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
804
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300805 ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400806 ec_hdr->version = UBI_VERSION;
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300807 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
808 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
Adrian Hunter0c6c7fa2009-06-26 14:58:01 +0300809 ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400810 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300811 ec_hdr->hdr_crc = cpu_to_be32(crc);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400812
Artem Bityutskiy8056eb42012-05-16 18:24:09 +0300813 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400814 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +0200815 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400816
david.oberhollenzer@sigma-star.at50269062015-03-26 23:59:50 +0100817 if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
818 return -EROFS;
819
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400820 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
821 return err;
822}
823
824/**
825 * validate_vid_hdr - validate a volume identifier header.
826 * @ubi: UBI device description object
827 * @vid_hdr: the volume identifier header to check
828 *
829 * This function checks that data stored in the volume identifier header
830 * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
831 */
832static int validate_vid_hdr(const struct ubi_device *ubi,
833 const struct ubi_vid_hdr *vid_hdr)
834{
835 int vol_type = vid_hdr->vol_type;
836 int copy_flag = vid_hdr->copy_flag;
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300837 int vol_id = be32_to_cpu(vid_hdr->vol_id);
838 int lnum = be32_to_cpu(vid_hdr->lnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400839 int compat = vid_hdr->compat;
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300840 int data_size = be32_to_cpu(vid_hdr->data_size);
841 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
842 int data_pad = be32_to_cpu(vid_hdr->data_pad);
843 int data_crc = be32_to_cpu(vid_hdr->data_crc);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400844 int usable_leb_size = ubi->leb_size - data_pad;
845
846 if (copy_flag != 0 && copy_flag != 1) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300847 ubi_err(ubi, "bad copy_flag");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400848 goto bad;
849 }
850
851 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
852 data_pad < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300853 ubi_err(ubi, "negative values");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400854 goto bad;
855 }
856
857 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300858 ubi_err(ubi, "bad vol_id");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400859 goto bad;
860 }
861
862 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300863 ubi_err(ubi, "bad compat");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400864 goto bad;
865 }
866
867 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
868 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
869 compat != UBI_COMPAT_REJECT) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300870 ubi_err(ubi, "bad compat");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400871 goto bad;
872 }
873
874 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300875 ubi_err(ubi, "bad vol_type");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400876 goto bad;
877 }
878
879 if (data_pad >= ubi->leb_size / 2) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300880 ubi_err(ubi, "bad data_pad");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400881 goto bad;
882 }
883
Richard Weinberger281fda22015-09-22 23:58:07 +0200884 if (data_size > ubi->leb_size) {
885 ubi_err(ubi, "bad data_size");
886 goto bad;
887 }
888
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400889 if (vol_type == UBI_VID_STATIC) {
890 /*
891 * Although from high-level point of view static volumes may
892 * contain zero bytes of data, but no VID headers can contain
893 * zero at these fields, because they empty volumes do not have
894 * mapped logical eraseblocks.
895 */
896 if (used_ebs == 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300897 ubi_err(ubi, "zero used_ebs");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400898 goto bad;
899 }
900 if (data_size == 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300901 ubi_err(ubi, "zero data_size");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400902 goto bad;
903 }
904 if (lnum < used_ebs - 1) {
905 if (data_size != usable_leb_size) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300906 ubi_err(ubi, "bad data_size");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400907 goto bad;
908 }
909 } else if (lnum == used_ebs - 1) {
910 if (data_size == 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300911 ubi_err(ubi, "bad data_size at last LEB");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400912 goto bad;
913 }
914 } else {
Tanya Brokhman326087032014-10-20 19:57:00 +0300915 ubi_err(ubi, "too high lnum");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400916 goto bad;
917 }
918 } else {
919 if (copy_flag == 0) {
920 if (data_crc != 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300921 ubi_err(ubi, "non-zero data CRC");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400922 goto bad;
923 }
924 if (data_size != 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300925 ubi_err(ubi, "non-zero data_size");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400926 goto bad;
927 }
928 } else {
929 if (data_size == 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300930 ubi_err(ubi, "zero data_size of copy");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400931 goto bad;
932 }
933 }
934 if (used_ebs != 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300935 ubi_err(ubi, "bad used_ebs");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400936 goto bad;
937 }
938 }
939
940 return 0;
941
942bad:
Tanya Brokhman326087032014-10-20 19:57:00 +0300943 ubi_err(ubi, "bad VID header");
Artem Bityutskiya904e3f2012-04-25 09:02:44 +0300944 ubi_dump_vid_hdr(vid_hdr);
Artem Bityutskiy25886a32012-04-24 06:59:49 +0300945 dump_stack();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400946 return 1;
947}
948
949/**
950 * ubi_io_read_vid_hdr - read and check a volume identifier header.
951 * @ubi: UBI device description object
952 * @pnum: physical eraseblock number to read from
Boris Brezillon3291b522016-09-16 16:59:26 +0200953 * @vidb: the volume identifier buffer to store data in
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400954 * @verbose: be verbose if the header is corrupted or wasn't found
955 *
956 * This function reads the volume identifier header from physical eraseblock
Boris Brezillon3291b522016-09-16 16:59:26 +0200957 * @pnum and stores it in @vidb. It also checks CRC checksum of the read
Artem Bityutskiy74d82d22010-09-03 02:11:20 +0300958 * volume identifier header. The error codes are the same as in
959 * 'ubi_io_read_ec_hdr()'.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400960 *
Artem Bityutskiy74d82d22010-09-03 02:11:20 +0300961 * Note, the implementation of this function is also very similar to
962 * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400963 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300964int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
Boris Brezillon3291b522016-09-16 16:59:26 +0200965 struct ubi_vid_io_buf *vidb, int verbose)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400966{
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300967 int err, read_err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400968 uint32_t crc, magic, hdr_crc;
Boris Brezillon3291b522016-09-16 16:59:26 +0200969 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
970 void *p = vidb->buffer;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400971
972 dbg_io("read VID header from PEB %d", pnum);
973 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
974
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300975 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
Sascha Hauer8a8e8d22016-06-28 13:51:07 +0200976 ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
Brian Norrisd57f40542011-09-20 18:34:25 -0700977 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300978 return read_err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400979
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300980 magic = be32_to_cpu(vid_hdr->magic);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400981 if (magic != UBI_VID_HDR_MAGIC) {
Brian Norrisd57f40542011-09-20 18:34:25 -0700982 if (mtd_is_eccerr(read_err))
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300983 return UBI_IO_BAD_HDR_EBADMSG;
Artem Bityutskiyeb895802010-05-03 09:04:39 +0300984
Artem Bityutskiybb00e182010-07-31 09:37:34 +0300985 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400986 if (verbose)
Tanya Brokhman326087032014-10-20 19:57:00 +0300987 ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300988 pnum);
989 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
990 pnum);
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +0300991 if (!read_err)
992 return UBI_IO_FF;
993 else
994 return UBI_IO_FF_BITFLIPS;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400995 }
996
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400997 if (verbose) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300998 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
Artem Bityutskiy049333c2012-08-27 14:43:54 +0300999 pnum, magic, UBI_VID_HDR_MAGIC);
Artem Bityutskiya904e3f2012-04-25 09:02:44 +03001000 ubi_dump_vid_hdr(vid_hdr);
Artem Bityutskiy6f9fdf62011-03-11 13:08:51 +02001001 }
Artem Bityutskiy049333c2012-08-27 14:43:54 +03001002 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1003 pnum, magic, UBI_VID_HDR_MAGIC);
Artem Bityutskiy786d7832010-04-30 16:50:22 +03001004 return UBI_IO_BAD_HDR;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001005 }
1006
1007 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001008 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001009
1010 if (hdr_crc != crc) {
1011 if (verbose) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001012 ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
Artem Bityutskiy049333c2012-08-27 14:43:54 +03001013 pnum, crc, hdr_crc);
Artem Bityutskiya904e3f2012-04-25 09:02:44 +03001014 ubi_dump_vid_hdr(vid_hdr);
Artem Bityutskiy6f9fdf62011-03-11 13:08:51 +02001015 }
Artem Bityutskiy049333c2012-08-27 14:43:54 +03001016 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1017 pnum, crc, hdr_crc);
Artem Bityutskiy92e1a7d2010-09-03 14:22:17 +03001018 if (!read_err)
1019 return UBI_IO_BAD_HDR;
1020 else
1021 return UBI_IO_BAD_HDR_EBADMSG;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001022 }
1023
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001024 err = validate_vid_hdr(ubi, vid_hdr);
1025 if (err) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001026 ubi_err(ubi, "validation failed for PEB %d", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001027 return -EINVAL;
1028 }
1029
1030 return read_err ? UBI_IO_BITFLIPS : 0;
1031}
1032
1033/**
1034 * ubi_io_write_vid_hdr - write a volume identifier header.
1035 * @ubi: UBI device description object
1036 * @pnum: the physical eraseblock number to write to
Boris Brezillon3291b522016-09-16 16:59:26 +02001037 * @vidb: the volume identifier buffer to write
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001038 *
1039 * This function writes the volume identifier header described by @vid_hdr to
1040 * physical eraseblock @pnum. This function automatically fills the
Boris Brezillon3291b522016-09-16 16:59:26 +02001041 * @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates
1042 * header CRC checksum and stores it at vidb->hdr->hdr_crc.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001043 *
1044 * This function returns zero in case of success and a negative error code in
1045 * case of failure. If %-EIO is returned, the physical eraseblock probably went
1046 * bad.
1047 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +03001048int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
Boris Brezillon3291b522016-09-16 16:59:26 +02001049 struct ubi_vid_io_buf *vidb)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001050{
Boris Brezillon3291b522016-09-16 16:59:26 +02001051 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001052 int err;
1053 uint32_t crc;
Boris Brezillon3291b522016-09-16 16:59:26 +02001054 void *p = vidb->buffer;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001055
1056 dbg_io("write VID header to PEB %d", pnum);
1057 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1058
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001059 err = self_check_peb_ec_hdr(ubi, pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001060 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001061 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001062
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001063 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001064 vid_hdr->version = UBI_VERSION;
1065 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001066 vid_hdr->hdr_crc = cpu_to_be32(crc);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001067
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001068 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001069 if (err)
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001070 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001071
david.oberhollenzer@sigma-star.at50269062015-03-26 23:59:50 +01001072 if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
1073 return -EROFS;
1074
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001075 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1076 ubi->vid_hdr_alsize);
1077 return err;
1078}
1079
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001080/**
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001081 * self_check_not_bad - ensure that a physical eraseblock is not bad.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001082 * @ubi: UBI device description object
1083 * @pnum: physical eraseblock number to check
1084 *
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001085 * This function returns zero if the physical eraseblock is good, %-EINVAL if
1086 * it is bad and a negative error code if an error occurred.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001087 */
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001088static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001089{
1090 int err;
1091
Ezequiel Garcia64575572012-11-28 09:18:29 -03001092 if (!ubi_dbg_chk_io(ubi))
Artem Bityutskiy92d124f2011-03-14 18:17:40 +02001093 return 0;
1094
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001095 err = ubi_io_is_bad(ubi, pnum);
1096 if (!err)
1097 return err;
1098
Tanya Brokhman326087032014-10-20 19:57:00 +03001099 ubi_err(ubi, "self-check failed for PEB %d", pnum);
Artem Bityutskiy25886a32012-04-24 06:59:49 +03001100 dump_stack();
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001101 return err > 0 ? -EINVAL : err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001102}
1103
1104/**
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001105 * self_check_ec_hdr - check if an erase counter header is all right.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001106 * @ubi: UBI device description object
1107 * @pnum: physical eraseblock number the erase counter header belongs to
1108 * @ec_hdr: the erase counter header to check
1109 *
1110 * This function returns zero if the erase counter header contains valid
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001111 * values, and %-EINVAL if not.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001112 */
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001113static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1114 const struct ubi_ec_hdr *ec_hdr)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001115{
1116 int err;
1117 uint32_t magic;
1118
Ezequiel Garcia64575572012-11-28 09:18:29 -03001119 if (!ubi_dbg_chk_io(ubi))
Artem Bityutskiy92d124f2011-03-14 18:17:40 +02001120 return 0;
1121
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001122 magic = be32_to_cpu(ec_hdr->magic);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001123 if (magic != UBI_EC_HDR_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001124 ubi_err(ubi, "bad magic %#08x, must be %#08x",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001125 magic, UBI_EC_HDR_MAGIC);
1126 goto fail;
1127 }
1128
1129 err = validate_ec_hdr(ubi, ec_hdr);
1130 if (err) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001131 ubi_err(ubi, "self-check failed for PEB %d", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001132 goto fail;
1133 }
1134
1135 return 0;
1136
1137fail:
Artem Bityutskiya904e3f2012-04-25 09:02:44 +03001138 ubi_dump_ec_hdr(ec_hdr);
Artem Bityutskiy25886a32012-04-24 06:59:49 +03001139 dump_stack();
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001140 return -EINVAL;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001141}
1142
1143/**
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001144 * self_check_peb_ec_hdr - check erase counter header.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001145 * @ubi: UBI device description object
1146 * @pnum: the physical eraseblock number to check
1147 *
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001148 * This function returns zero if the erase counter header is all right and and
1149 * a negative error code if not or if an error occurred.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001150 */
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001151static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001152{
1153 int err;
1154 uint32_t crc, hdr_crc;
1155 struct ubi_ec_hdr *ec_hdr;
1156
Ezequiel Garcia64575572012-11-28 09:18:29 -03001157 if (!ubi_dbg_chk_io(ubi))
Artem Bityutskiy92d124f2011-03-14 18:17:40 +02001158 return 0;
1159
Artem Bityutskiy33818bb2007-08-28 21:29:32 +03001160 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001161 if (!ec_hdr)
1162 return -ENOMEM;
1163
1164 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
Brian Norrisd57f40542011-09-20 18:34:25 -07001165 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001166 goto exit;
1167
1168 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001169 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001170 if (hdr_crc != crc) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001171 ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
1172 crc, hdr_crc);
1173 ubi_err(ubi, "self-check failed for PEB %d", pnum);
Artem Bityutskiya904e3f2012-04-25 09:02:44 +03001174 ubi_dump_ec_hdr(ec_hdr);
Artem Bityutskiy25886a32012-04-24 06:59:49 +03001175 dump_stack();
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001176 err = -EINVAL;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001177 goto exit;
1178 }
1179
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001180 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001181
1182exit:
1183 kfree(ec_hdr);
1184 return err;
1185}
1186
1187/**
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001188 * self_check_vid_hdr - check that a volume identifier header is all right.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001189 * @ubi: UBI device description object
1190 * @pnum: physical eraseblock number the volume identifier header belongs to
1191 * @vid_hdr: the volume identifier header to check
1192 *
1193 * This function returns zero if the volume identifier header is all right, and
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001194 * %-EINVAL if not.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001195 */
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001196static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1197 const struct ubi_vid_hdr *vid_hdr)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001198{
1199 int err;
1200 uint32_t magic;
1201
Ezequiel Garcia64575572012-11-28 09:18:29 -03001202 if (!ubi_dbg_chk_io(ubi))
Artem Bityutskiy92d124f2011-03-14 18:17:40 +02001203 return 0;
1204
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001205 magic = be32_to_cpu(vid_hdr->magic);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001206 if (magic != UBI_VID_HDR_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001207 ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001208 magic, pnum, UBI_VID_HDR_MAGIC);
1209 goto fail;
1210 }
1211
1212 err = validate_vid_hdr(ubi, vid_hdr);
1213 if (err) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001214 ubi_err(ubi, "self-check failed for PEB %d", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001215 goto fail;
1216 }
1217
1218 return err;
1219
1220fail:
Tanya Brokhman326087032014-10-20 19:57:00 +03001221 ubi_err(ubi, "self-check failed for PEB %d", pnum);
Artem Bityutskiya904e3f2012-04-25 09:02:44 +03001222 ubi_dump_vid_hdr(vid_hdr);
Artem Bityutskiy25886a32012-04-24 06:59:49 +03001223 dump_stack();
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001224 return -EINVAL;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001225
1226}
1227
1228/**
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001229 * self_check_peb_vid_hdr - check volume identifier header.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001230 * @ubi: UBI device description object
1231 * @pnum: the physical eraseblock number to check
1232 *
1233 * This function returns zero if the volume identifier header is all right,
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001234 * and a negative error code if not or if an error occurred.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001235 */
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001236static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001237{
1238 int err;
1239 uint32_t crc, hdr_crc;
Boris Brezillon3291b522016-09-16 16:59:26 +02001240 struct ubi_vid_io_buf *vidb;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001241 struct ubi_vid_hdr *vid_hdr;
1242 void *p;
1243
Ezequiel Garcia64575572012-11-28 09:18:29 -03001244 if (!ubi_dbg_chk_io(ubi))
Artem Bityutskiy92d124f2011-03-14 18:17:40 +02001245 return 0;
1246
Boris Brezillon3291b522016-09-16 16:59:26 +02001247 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1248 if (!vidb)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001249 return -ENOMEM;
1250
Boris Brezillon3291b522016-09-16 16:59:26 +02001251 vid_hdr = ubi_get_vid_hdr(vidb);
1252 p = vidb->buffer;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001253 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1254 ubi->vid_hdr_alsize);
Brian Norrisd57f40542011-09-20 18:34:25 -07001255 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001256 goto exit;
1257
Brian Norris2e69d492015-11-20 14:10:54 -08001258 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001259 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001260 if (hdr_crc != crc) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001261 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
Artem Bityutskiy049333c2012-08-27 14:43:54 +03001262 pnum, crc, hdr_crc);
Tanya Brokhman326087032014-10-20 19:57:00 +03001263 ubi_err(ubi, "self-check failed for PEB %d", pnum);
Artem Bityutskiya904e3f2012-04-25 09:02:44 +03001264 ubi_dump_vid_hdr(vid_hdr);
Artem Bityutskiy25886a32012-04-24 06:59:49 +03001265 dump_stack();
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001266 err = -EINVAL;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001267 goto exit;
1268 }
1269
Artem Bityutskiy8056eb42012-05-16 18:24:09 +03001270 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001271
1272exit:
Boris Brezillon3291b522016-09-16 16:59:26 +02001273 ubi_free_vid_buf(vidb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001274 return err;
1275}
1276
1277/**
Artem Bityutskiy97d61042012-05-16 19:29:04 +03001278 * self_check_write - make sure write succeeded.
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001279 * @ubi: UBI device description object
1280 * @buf: buffer with data which were written
1281 * @pnum: physical eraseblock number the data were written to
1282 * @offset: offset within the physical eraseblock the data were written to
1283 * @len: how many bytes were written
1284 *
1285 * This functions reads data which were recently written and compares it with
1286 * the original data buffer - the data have to match. Returns zero if the data
1287 * match and a negative error code if not or in case of failure.
1288 */
Artem Bityutskiy97d61042012-05-16 19:29:04 +03001289static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1290 int offset, int len)
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001291{
1292 int err, i;
Artem Bityutskiy7950d022010-11-19 17:05:36 +02001293 size_t read;
Artem Bityutskiya7586742011-03-14 17:06:52 +02001294 void *buf1;
Artem Bityutskiy7950d022010-11-19 17:05:36 +02001295 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001296
Ezequiel Garcia64575572012-11-28 09:18:29 -03001297 if (!ubi_dbg_chk_io(ubi))
Artem Bityutskiy92d124f2011-03-14 18:17:40 +02001298 return 0;
1299
Artem Bityutskiy3d46b312011-03-24 16:09:56 +02001300 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
Artem Bityutskiya7586742011-03-14 17:06:52 +02001301 if (!buf1) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001302 ubi_err(ubi, "cannot allocate memory to check writes");
Artem Bityutskiya7586742011-03-14 17:06:52 +02001303 return 0;
1304 }
1305
Artem Bityutskiy329ad392011-12-23 17:30:16 +02001306 err = mtd_read(ubi->mtd, addr, len, &read, buf1);
Brian Norrisd57f40542011-09-20 18:34:25 -07001307 if (err && !mtd_is_bitflip(err))
Artem Bityutskiya7586742011-03-14 17:06:52 +02001308 goto out_free;
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001309
1310 for (i = 0; i < len; i++) {
1311 uint8_t c = ((uint8_t *)buf)[i];
Artem Bityutskiya7586742011-03-14 17:06:52 +02001312 uint8_t c1 = ((uint8_t *)buf1)[i];
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001313 int dump_len;
1314
1315 if (c == c1)
1316 continue;
1317
Tanya Brokhman326087032014-10-20 19:57:00 +03001318 ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001319 pnum, offset, len);
Tanya Brokhman326087032014-10-20 19:57:00 +03001320 ubi_msg(ubi, "data differ at position %d", i);
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001321 dump_len = max_t(int, 128, len - i);
Tanya Brokhman326087032014-10-20 19:57:00 +03001322 ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001323 i, i + dump_len);
1324 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1325 buf + i, dump_len, 1);
Tanya Brokhman326087032014-10-20 19:57:00 +03001326 ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001327 i, i + dump_len);
1328 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
Artem Bityutskiya7586742011-03-14 17:06:52 +02001329 buf1 + i, dump_len, 1);
Artem Bityutskiy25886a32012-04-24 06:59:49 +03001330 dump_stack();
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001331 err = -EINVAL;
Artem Bityutskiya7586742011-03-14 17:06:52 +02001332 goto out_free;
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001333 }
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001334
Artem Bityutskiya7586742011-03-14 17:06:52 +02001335 vfree(buf1);
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001336 return 0;
1337
Artem Bityutskiya7586742011-03-14 17:06:52 +02001338out_free:
1339 vfree(buf1);
Artem Bityutskiy6e9065d2010-01-25 17:09:30 +02001340 return err;
1341}
1342
1343/**
Artem Bityutskiy97d61042012-05-16 19:29:04 +03001344 * ubi_self_check_all_ff - check that a region of flash is empty.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001345 * @ubi: UBI device description object
1346 * @pnum: the physical eraseblock number to check
1347 * @offset: the starting offset within the physical eraseblock to check
1348 * @len: the length of the region to check
1349 *
1350 * This function returns zero if only 0xFF bytes are present at offset
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001351 * @offset of the physical eraseblock @pnum, and a negative error code if not
1352 * or if an error occurred.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001353 */
Artem Bityutskiy97d61042012-05-16 19:29:04 +03001354int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001355{
1356 size_t read;
1357 int err;
Artem Bityutskiy332873d2011-03-14 17:09:40 +02001358 void *buf;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001359 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1360
Ezequiel Garcia64575572012-11-28 09:18:29 -03001361 if (!ubi_dbg_chk_io(ubi))
Artem Bityutskiy92d124f2011-03-14 18:17:40 +02001362 return 0;
1363
Artem Bityutskiy3d46b312011-03-24 16:09:56 +02001364 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
Artem Bityutskiy332873d2011-03-14 17:09:40 +02001365 if (!buf) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001366 ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
Artem Bityutskiy332873d2011-03-14 17:09:40 +02001367 return 0;
1368 }
1369
Artem Bityutskiy329ad392011-12-23 17:30:16 +02001370 err = mtd_read(ubi->mtd, addr, len, &read, buf);
Brian Norrisd57f40542011-09-20 18:34:25 -07001371 if (err && !mtd_is_bitflip(err)) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001372 ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
Artem Bityutskiy049333c2012-08-27 14:43:54 +03001373 err, len, pnum, offset, read);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001374 goto error;
1375 }
1376
Artem Bityutskiy332873d2011-03-14 17:09:40 +02001377 err = ubi_check_pattern(buf, 0xFF, len);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001378 if (err == 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001379 ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
Artem Bityutskiy049333c2012-08-27 14:43:54 +03001380 pnum, offset, len);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001381 goto fail;
1382 }
1383
Artem Bityutskiy332873d2011-03-14 17:09:40 +02001384 vfree(buf);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001385 return 0;
1386
1387fail:
Tanya Brokhman326087032014-10-20 19:57:00 +03001388 ubi_err(ubi, "self-check failed for PEB %d", pnum);
Tanya Brokhman45fc5c82014-11-09 13:06:25 +02001389 ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
Artem Bityutskiy332873d2011-03-14 17:09:40 +02001390 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
Artem Bityutskiyadbf05e2010-01-20 10:28:58 +02001391 err = -EINVAL;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001392error:
Artem Bityutskiy25886a32012-04-24 06:59:49 +03001393 dump_stack();
Artem Bityutskiy332873d2011-03-14 17:09:40 +02001394 vfree(buf);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001395 return err;
1396}