Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2012 Linutronix GmbH |
| 3 | * Author: Richard Weinberger <richard@nod.at> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; version 2. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
| 12 | * the GNU General Public License for more details. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/crc32.h> |
| 17 | #include "ubi.h" |
| 18 | |
| 19 | /** |
| 20 | * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. |
| 21 | * @ubi: UBI device description object |
| 22 | */ |
| 23 | size_t ubi_calc_fm_size(struct ubi_device *ubi) |
| 24 | { |
| 25 | size_t size; |
| 26 | |
Richard Weinberger | 91401a34 | 2014-09-30 00:20:46 +0200 | [diff] [blame] | 27 | size = sizeof(struct ubi_fm_sb) + \ |
| 28 | sizeof(struct ubi_fm_hdr) + \ |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 29 | sizeof(struct ubi_fm_scan_pool) + \ |
| 30 | sizeof(struct ubi_fm_scan_pool) + \ |
| 31 | (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \ |
| 32 | (sizeof(struct ubi_fm_eba) + \ |
| 33 | (ubi->peb_count * sizeof(__be32))) + \ |
| 34 | sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; |
| 35 | return roundup(size, ubi->leb_size); |
| 36 | } |
| 37 | |
| 38 | |
| 39 | /** |
| 40 | * new_fm_vhdr - allocate a new volume header for fastmap usage. |
| 41 | * @ubi: UBI device description object |
| 42 | * @vol_id: the VID of the new header |
| 43 | * |
| 44 | * Returns a new struct ubi_vid_hdr on success. |
| 45 | * NULL indicates out of memory. |
| 46 | */ |
| 47 | static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) |
| 48 | { |
| 49 | struct ubi_vid_hdr *new; |
| 50 | |
| 51 | new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
| 52 | if (!new) |
| 53 | goto out; |
| 54 | |
| 55 | new->vol_type = UBI_VID_DYNAMIC; |
| 56 | new->vol_id = cpu_to_be32(vol_id); |
| 57 | |
| 58 | /* UBI implementations without fastmap support have to delete the |
| 59 | * fastmap. |
| 60 | */ |
| 61 | new->compat = UBI_COMPAT_DELETE; |
| 62 | |
| 63 | out: |
| 64 | return new; |
| 65 | } |
| 66 | |
| 67 | /** |
| 68 | * add_aeb - create and add a attach erase block to a given list. |
| 69 | * @ai: UBI attach info object |
| 70 | * @list: the target list |
| 71 | * @pnum: PEB number of the new attach erase block |
| 72 | * @ec: erease counter of the new LEB |
| 73 | * @scrub: scrub this PEB after attaching |
| 74 | * |
| 75 | * Returns 0 on success, < 0 indicates an internal error. |
| 76 | */ |
| 77 | static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, |
| 78 | int pnum, int ec, int scrub) |
| 79 | { |
| 80 | struct ubi_ainf_peb *aeb; |
| 81 | |
| 82 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); |
| 83 | if (!aeb) |
| 84 | return -ENOMEM; |
| 85 | |
| 86 | aeb->pnum = pnum; |
| 87 | aeb->ec = ec; |
| 88 | aeb->lnum = -1; |
| 89 | aeb->scrub = scrub; |
| 90 | aeb->copy_flag = aeb->sqnum = 0; |
| 91 | |
| 92 | ai->ec_sum += aeb->ec; |
| 93 | ai->ec_count++; |
| 94 | |
| 95 | if (ai->max_ec < aeb->ec) |
| 96 | ai->max_ec = aeb->ec; |
| 97 | |
| 98 | if (ai->min_ec > aeb->ec) |
| 99 | ai->min_ec = aeb->ec; |
| 100 | |
| 101 | list_add_tail(&aeb->u.list, list); |
| 102 | |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | /** |
| 107 | * add_vol - create and add a new volume to ubi_attach_info. |
| 108 | * @ai: ubi_attach_info object |
| 109 | * @vol_id: VID of the new volume |
| 110 | * @used_ebs: number of used EBS |
| 111 | * @data_pad: data padding value of the new volume |
| 112 | * @vol_type: volume type |
| 113 | * @last_eb_bytes: number of bytes in the last LEB |
| 114 | * |
| 115 | * Returns the new struct ubi_ainf_volume on success. |
| 116 | * NULL indicates an error. |
| 117 | */ |
| 118 | static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, |
| 119 | int used_ebs, int data_pad, u8 vol_type, |
| 120 | int last_eb_bytes) |
| 121 | { |
| 122 | struct ubi_ainf_volume *av; |
| 123 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; |
| 124 | |
| 125 | while (*p) { |
| 126 | parent = *p; |
| 127 | av = rb_entry(parent, struct ubi_ainf_volume, rb); |
| 128 | |
Heiko Schocher | e911036 | 2014-06-24 09:25:18 +0200 | [diff] [blame] | 129 | if (vol_id > av->vol_id) |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 130 | p = &(*p)->rb_left; |
Mike Snitzer | 604b592 | 2014-03-21 15:54:03 -0400 | [diff] [blame] | 131 | else |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 132 | p = &(*p)->rb_right; |
| 133 | } |
| 134 | |
| 135 | av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); |
| 136 | if (!av) |
| 137 | goto out; |
| 138 | |
| 139 | av->highest_lnum = av->leb_count = 0; |
| 140 | av->vol_id = vol_id; |
| 141 | av->used_ebs = used_ebs; |
| 142 | av->data_pad = data_pad; |
| 143 | av->last_data_size = last_eb_bytes; |
| 144 | av->compat = 0; |
| 145 | av->vol_type = vol_type; |
| 146 | av->root = RB_ROOT; |
| 147 | |
| 148 | dbg_bld("found volume (ID %i)", vol_id); |
| 149 | |
| 150 | rb_link_node(&av->rb, parent, p); |
| 151 | rb_insert_color(&av->rb, &ai->volumes); |
| 152 | |
| 153 | out: |
| 154 | return av; |
| 155 | } |
| 156 | |
| 157 | /** |
| 158 | * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it |
| 159 | * from it's original list. |
| 160 | * @ai: ubi_attach_info object |
| 161 | * @aeb: the to be assigned SEB |
| 162 | * @av: target scan volume |
| 163 | */ |
| 164 | static void assign_aeb_to_av(struct ubi_attach_info *ai, |
| 165 | struct ubi_ainf_peb *aeb, |
| 166 | struct ubi_ainf_volume *av) |
| 167 | { |
| 168 | struct ubi_ainf_peb *tmp_aeb; |
| 169 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; |
| 170 | |
| 171 | p = &av->root.rb_node; |
| 172 | while (*p) { |
| 173 | parent = *p; |
| 174 | |
| 175 | tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); |
| 176 | if (aeb->lnum != tmp_aeb->lnum) { |
| 177 | if (aeb->lnum < tmp_aeb->lnum) |
| 178 | p = &(*p)->rb_left; |
| 179 | else |
| 180 | p = &(*p)->rb_right; |
| 181 | |
| 182 | continue; |
| 183 | } else |
| 184 | break; |
| 185 | } |
| 186 | |
| 187 | list_del(&aeb->u.list); |
| 188 | av->leb_count++; |
| 189 | |
| 190 | rb_link_node(&aeb->u.rb, parent, p); |
| 191 | rb_insert_color(&aeb->u.rb, &av->root); |
| 192 | } |
| 193 | |
| 194 | /** |
| 195 | * update_vol - inserts or updates a LEB which was found a pool. |
| 196 | * @ubi: the UBI device object |
| 197 | * @ai: attach info object |
| 198 | * @av: the volume this LEB belongs to |
| 199 | * @new_vh: the volume header derived from new_aeb |
| 200 | * @new_aeb: the AEB to be examined |
| 201 | * |
| 202 | * Returns 0 on success, < 0 indicates an internal error. |
| 203 | */ |
| 204 | static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, |
| 205 | struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, |
| 206 | struct ubi_ainf_peb *new_aeb) |
| 207 | { |
| 208 | struct rb_node **p = &av->root.rb_node, *parent = NULL; |
| 209 | struct ubi_ainf_peb *aeb, *victim; |
| 210 | int cmp_res; |
| 211 | |
| 212 | while (*p) { |
| 213 | parent = *p; |
| 214 | aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); |
| 215 | |
| 216 | if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { |
| 217 | if (be32_to_cpu(new_vh->lnum) < aeb->lnum) |
| 218 | p = &(*p)->rb_left; |
| 219 | else |
| 220 | p = &(*p)->rb_right; |
| 221 | |
| 222 | continue; |
| 223 | } |
| 224 | |
| 225 | /* This case can happen if the fastmap gets written |
| 226 | * because of a volume change (creation, deletion, ..). |
| 227 | * Then a PEB can be within the persistent EBA and the pool. |
| 228 | */ |
| 229 | if (aeb->pnum == new_aeb->pnum) { |
| 230 | ubi_assert(aeb->lnum == new_aeb->lnum); |
| 231 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); |
| 232 | |
| 233 | return 0; |
| 234 | } |
| 235 | |
| 236 | cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); |
| 237 | if (cmp_res < 0) |
| 238 | return cmp_res; |
| 239 | |
| 240 | /* new_aeb is newer */ |
| 241 | if (cmp_res & 1) { |
| 242 | victim = kmem_cache_alloc(ai->aeb_slab_cache, |
| 243 | GFP_KERNEL); |
| 244 | if (!victim) |
| 245 | return -ENOMEM; |
| 246 | |
| 247 | victim->ec = aeb->ec; |
| 248 | victim->pnum = aeb->pnum; |
| 249 | list_add_tail(&victim->u.list, &ai->erase); |
| 250 | |
| 251 | if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) |
| 252 | av->last_data_size = \ |
| 253 | be32_to_cpu(new_vh->data_size); |
| 254 | |
| 255 | dbg_bld("vol %i: AEB %i's PEB %i is the newer", |
| 256 | av->vol_id, aeb->lnum, new_aeb->pnum); |
| 257 | |
| 258 | aeb->ec = new_aeb->ec; |
| 259 | aeb->pnum = new_aeb->pnum; |
| 260 | aeb->copy_flag = new_vh->copy_flag; |
| 261 | aeb->scrub = new_aeb->scrub; |
| 262 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); |
| 263 | |
| 264 | /* new_aeb is older */ |
| 265 | } else { |
| 266 | dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", |
| 267 | av->vol_id, aeb->lnum, new_aeb->pnum); |
| 268 | list_add_tail(&new_aeb->u.list, &ai->erase); |
| 269 | } |
| 270 | |
| 271 | return 0; |
| 272 | } |
| 273 | /* This LEB is new, let's add it to the volume */ |
| 274 | |
| 275 | if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { |
| 276 | av->highest_lnum = be32_to_cpu(new_vh->lnum); |
| 277 | av->last_data_size = be32_to_cpu(new_vh->data_size); |
| 278 | } |
| 279 | |
| 280 | if (av->vol_type == UBI_STATIC_VOLUME) |
| 281 | av->used_ebs = be32_to_cpu(new_vh->used_ebs); |
| 282 | |
| 283 | av->leb_count++; |
| 284 | |
| 285 | rb_link_node(&new_aeb->u.rb, parent, p); |
| 286 | rb_insert_color(&new_aeb->u.rb, &av->root); |
| 287 | |
| 288 | return 0; |
| 289 | } |
| 290 | |
| 291 | /** |
| 292 | * process_pool_aeb - we found a non-empty PEB in a pool. |
| 293 | * @ubi: UBI device object |
| 294 | * @ai: attach info object |
| 295 | * @new_vh: the volume header derived from new_aeb |
| 296 | * @new_aeb: the AEB to be examined |
| 297 | * |
| 298 | * Returns 0 on success, < 0 indicates an internal error. |
| 299 | */ |
| 300 | static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, |
| 301 | struct ubi_vid_hdr *new_vh, |
| 302 | struct ubi_ainf_peb *new_aeb) |
| 303 | { |
| 304 | struct ubi_ainf_volume *av, *tmp_av = NULL; |
| 305 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; |
| 306 | int found = 0; |
| 307 | |
| 308 | if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || |
| 309 | be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { |
| 310 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); |
| 311 | |
| 312 | return 0; |
| 313 | } |
| 314 | |
| 315 | /* Find the volume this SEB belongs to */ |
| 316 | while (*p) { |
| 317 | parent = *p; |
| 318 | tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); |
| 319 | |
| 320 | if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) |
| 321 | p = &(*p)->rb_left; |
| 322 | else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) |
| 323 | p = &(*p)->rb_right; |
| 324 | else { |
| 325 | found = 1; |
| 326 | break; |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | if (found) |
| 331 | av = tmp_av; |
| 332 | else { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 333 | ubi_err(ubi, "orphaned volume in fastmap pool!"); |
Richard Genoud | 1bf1890 | 2014-09-09 14:25:01 +0200 | [diff] [blame] | 334 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 335 | return UBI_BAD_FASTMAP; |
| 336 | } |
| 337 | |
| 338 | ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); |
| 339 | |
| 340 | return update_vol(ubi, ai, av, new_vh, new_aeb); |
| 341 | } |
| 342 | |
| 343 | /** |
| 344 | * unmap_peb - unmap a PEB. |
| 345 | * If fastmap detects a free PEB in the pool it has to check whether |
| 346 | * this PEB has been unmapped after writing the fastmap. |
| 347 | * |
| 348 | * @ai: UBI attach info object |
| 349 | * @pnum: The PEB to be unmapped |
| 350 | */ |
| 351 | static void unmap_peb(struct ubi_attach_info *ai, int pnum) |
| 352 | { |
| 353 | struct ubi_ainf_volume *av; |
| 354 | struct rb_node *node, *node2; |
| 355 | struct ubi_ainf_peb *aeb; |
| 356 | |
| 357 | for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { |
| 358 | av = rb_entry(node, struct ubi_ainf_volume, rb); |
| 359 | |
| 360 | for (node2 = rb_first(&av->root); node2; |
| 361 | node2 = rb_next(node2)) { |
| 362 | aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); |
| 363 | if (aeb->pnum == pnum) { |
| 364 | rb_erase(&aeb->u.rb, &av->root); |
Richard Weinberger | ad3d6a0 | 2014-10-24 15:22:05 +0200 | [diff] [blame^] | 365 | av->leb_count--; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 366 | kmem_cache_free(ai->aeb_slab_cache, aeb); |
| 367 | return; |
| 368 | } |
| 369 | } |
| 370 | } |
| 371 | } |
| 372 | |
| 373 | /** |
| 374 | * scan_pool - scans a pool for changed (no longer empty PEBs). |
| 375 | * @ubi: UBI device object |
| 376 | * @ai: attach info object |
| 377 | * @pebs: an array of all PEB numbers in the to be scanned pool |
| 378 | * @pool_size: size of the pool (number of entries in @pebs) |
| 379 | * @max_sqnum: pointer to the maximal sequence number |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 380 | * @free: list of PEBs which are most likely free (and go into @ai->free) |
| 381 | * |
| 382 | * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. |
| 383 | * < 0 indicates an internal error. |
| 384 | */ |
| 385 | static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, |
| 386 | int *pebs, int pool_size, unsigned long long *max_sqnum, |
Richard Weinberger | d141a8e | 2014-10-07 21:39:20 +0200 | [diff] [blame] | 387 | struct list_head *free) |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 388 | { |
| 389 | struct ubi_vid_hdr *vh; |
| 390 | struct ubi_ec_hdr *ech; |
Richard Weinberger | d141a8e | 2014-10-07 21:39:20 +0200 | [diff] [blame] | 391 | struct ubi_ainf_peb *new_aeb; |
| 392 | int i, pnum, err, ret = 0; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 393 | |
| 394 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
| 395 | if (!ech) |
| 396 | return -ENOMEM; |
| 397 | |
| 398 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
| 399 | if (!vh) { |
| 400 | kfree(ech); |
| 401 | return -ENOMEM; |
| 402 | } |
| 403 | |
| 404 | dbg_bld("scanning fastmap pool: size = %i", pool_size); |
| 405 | |
| 406 | /* |
| 407 | * Now scan all PEBs in the pool to find changes which have been made |
| 408 | * after the creation of the fastmap |
| 409 | */ |
| 410 | for (i = 0; i < pool_size; i++) { |
| 411 | int scrub = 0; |
Richard Genoud | c22301a | 2013-09-28 15:55:13 +0200 | [diff] [blame] | 412 | int image_seq; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 413 | |
| 414 | pnum = be32_to_cpu(pebs[i]); |
| 415 | |
| 416 | if (ubi_io_is_bad(ubi, pnum)) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 417 | ubi_err(ubi, "bad PEB in fastmap pool!"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 418 | ret = UBI_BAD_FASTMAP; |
| 419 | goto out; |
| 420 | } |
| 421 | |
| 422 | err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); |
| 423 | if (err && err != UBI_IO_BITFLIPS) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 424 | ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 425 | pnum, err); |
| 426 | ret = err > 0 ? UBI_BAD_FASTMAP : err; |
| 427 | goto out; |
Brian Norris | 44305eb | 2014-05-20 22:35:38 -0700 | [diff] [blame] | 428 | } else if (err == UBI_IO_BITFLIPS) |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 429 | scrub = 1; |
| 430 | |
Richard Genoud | c22301a | 2013-09-28 15:55:13 +0200 | [diff] [blame] | 431 | /* |
| 432 | * Older UBI implementations have image_seq set to zero, so |
| 433 | * we shouldn't fail if image_seq == 0. |
| 434 | */ |
| 435 | image_seq = be32_to_cpu(ech->image_seq); |
| 436 | |
| 437 | if (image_seq && (image_seq != ubi->image_seq)) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 438 | ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 439 | be32_to_cpu(ech->image_seq), ubi->image_seq); |
Richard Weinberger | f240dca | 2013-09-28 15:55:11 +0200 | [diff] [blame] | 440 | ret = UBI_BAD_FASTMAP; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 441 | goto out; |
| 442 | } |
| 443 | |
| 444 | err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); |
| 445 | if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { |
| 446 | unsigned long long ec = be64_to_cpu(ech->ec); |
| 447 | unmap_peb(ai, pnum); |
| 448 | dbg_bld("Adding PEB to free: %i", pnum); |
| 449 | if (err == UBI_IO_FF_BITFLIPS) |
| 450 | add_aeb(ai, free, pnum, ec, 1); |
| 451 | else |
| 452 | add_aeb(ai, free, pnum, ec, 0); |
| 453 | continue; |
| 454 | } else if (err == 0 || err == UBI_IO_BITFLIPS) { |
| 455 | dbg_bld("Found non empty PEB:%i in pool", pnum); |
| 456 | |
| 457 | if (err == UBI_IO_BITFLIPS) |
| 458 | scrub = 1; |
| 459 | |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 460 | new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, |
| 461 | GFP_KERNEL); |
| 462 | if (!new_aeb) { |
| 463 | ret = -ENOMEM; |
| 464 | goto out; |
| 465 | } |
| 466 | |
| 467 | new_aeb->ec = be64_to_cpu(ech->ec); |
| 468 | new_aeb->pnum = pnum; |
| 469 | new_aeb->lnum = be32_to_cpu(vh->lnum); |
| 470 | new_aeb->sqnum = be64_to_cpu(vh->sqnum); |
| 471 | new_aeb->copy_flag = vh->copy_flag; |
| 472 | new_aeb->scrub = scrub; |
| 473 | |
| 474 | if (*max_sqnum < new_aeb->sqnum) |
| 475 | *max_sqnum = new_aeb->sqnum; |
| 476 | |
| 477 | err = process_pool_aeb(ubi, ai, vh, new_aeb); |
| 478 | if (err) { |
| 479 | ret = err > 0 ? UBI_BAD_FASTMAP : err; |
| 480 | goto out; |
| 481 | } |
| 482 | } else { |
| 483 | /* We are paranoid and fall back to scanning mode */ |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 484 | ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 485 | ret = err > 0 ? UBI_BAD_FASTMAP : err; |
| 486 | goto out; |
| 487 | } |
| 488 | |
| 489 | } |
| 490 | |
| 491 | out: |
| 492 | ubi_free_vid_hdr(ubi, vh); |
| 493 | kfree(ech); |
| 494 | return ret; |
| 495 | } |
| 496 | |
| 497 | /** |
| 498 | * count_fastmap_pebs - Counts the PEBs found by fastmap. |
| 499 | * @ai: The UBI attach info object |
| 500 | */ |
| 501 | static int count_fastmap_pebs(struct ubi_attach_info *ai) |
| 502 | { |
| 503 | struct ubi_ainf_peb *aeb; |
| 504 | struct ubi_ainf_volume *av; |
| 505 | struct rb_node *rb1, *rb2; |
| 506 | int n = 0; |
| 507 | |
| 508 | list_for_each_entry(aeb, &ai->erase, u.list) |
| 509 | n++; |
| 510 | |
| 511 | list_for_each_entry(aeb, &ai->free, u.list) |
| 512 | n++; |
| 513 | |
| 514 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) |
| 515 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) |
| 516 | n++; |
| 517 | |
| 518 | return n; |
| 519 | } |
| 520 | |
| 521 | /** |
| 522 | * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. |
| 523 | * @ubi: UBI device object |
| 524 | * @ai: UBI attach info object |
| 525 | * @fm: the fastmap to be attached |
| 526 | * |
| 527 | * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. |
| 528 | * < 0 indicates an internal error. |
| 529 | */ |
| 530 | static int ubi_attach_fastmap(struct ubi_device *ubi, |
| 531 | struct ubi_attach_info *ai, |
| 532 | struct ubi_fastmap_layout *fm) |
| 533 | { |
Richard Weinberger | d141a8e | 2014-10-07 21:39:20 +0200 | [diff] [blame] | 534 | struct list_head used, free; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 535 | struct ubi_ainf_volume *av; |
| 536 | struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 537 | struct ubi_fm_sb *fmsb; |
| 538 | struct ubi_fm_hdr *fmhdr; |
| 539 | struct ubi_fm_scan_pool *fmpl1, *fmpl2; |
| 540 | struct ubi_fm_ec *fmec; |
| 541 | struct ubi_fm_volhdr *fmvhdr; |
| 542 | struct ubi_fm_eba *fm_eba; |
| 543 | int ret, i, j, pool_size, wl_pool_size; |
| 544 | size_t fm_pos = 0, fm_size = ubi->fm_size; |
| 545 | unsigned long long max_sqnum = 0; |
| 546 | void *fm_raw = ubi->fm_buf; |
| 547 | |
| 548 | INIT_LIST_HEAD(&used); |
| 549 | INIT_LIST_HEAD(&free); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 550 | ai->min_ec = UBI_MAX_ERASECOUNTER; |
| 551 | |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 552 | fmsb = (struct ubi_fm_sb *)(fm_raw); |
| 553 | ai->max_sqnum = fmsb->sqnum; |
| 554 | fm_pos += sizeof(struct ubi_fm_sb); |
| 555 | if (fm_pos >= fm_size) |
| 556 | goto fail_bad; |
| 557 | |
| 558 | fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); |
| 559 | fm_pos += sizeof(*fmhdr); |
| 560 | if (fm_pos >= fm_size) |
| 561 | goto fail_bad; |
| 562 | |
| 563 | if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 564 | ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 565 | be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); |
| 566 | goto fail_bad; |
| 567 | } |
| 568 | |
| 569 | fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); |
| 570 | fm_pos += sizeof(*fmpl1); |
| 571 | if (fm_pos >= fm_size) |
| 572 | goto fail_bad; |
| 573 | if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 574 | ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 575 | be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); |
| 576 | goto fail_bad; |
| 577 | } |
| 578 | |
| 579 | fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); |
| 580 | fm_pos += sizeof(*fmpl2); |
| 581 | if (fm_pos >= fm_size) |
| 582 | goto fail_bad; |
| 583 | if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 584 | ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 585 | be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); |
| 586 | goto fail_bad; |
| 587 | } |
| 588 | |
| 589 | pool_size = be16_to_cpu(fmpl1->size); |
| 590 | wl_pool_size = be16_to_cpu(fmpl2->size); |
| 591 | fm->max_pool_size = be16_to_cpu(fmpl1->max_size); |
| 592 | fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); |
| 593 | |
| 594 | if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 595 | ubi_err(ubi, "bad pool size: %i", pool_size); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 596 | goto fail_bad; |
| 597 | } |
| 598 | |
| 599 | if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 600 | ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 601 | goto fail_bad; |
| 602 | } |
| 603 | |
| 604 | |
| 605 | if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || |
| 606 | fm->max_pool_size < 0) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 607 | ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 608 | goto fail_bad; |
| 609 | } |
| 610 | |
| 611 | if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || |
| 612 | fm->max_wl_pool_size < 0) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 613 | ubi_err(ubi, "bad maximal WL pool size: %i", |
| 614 | fm->max_wl_pool_size); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 615 | goto fail_bad; |
| 616 | } |
| 617 | |
| 618 | /* read EC values from free list */ |
| 619 | for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { |
| 620 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 621 | fm_pos += sizeof(*fmec); |
| 622 | if (fm_pos >= fm_size) |
| 623 | goto fail_bad; |
| 624 | |
| 625 | add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), |
| 626 | be32_to_cpu(fmec->ec), 0); |
| 627 | } |
| 628 | |
| 629 | /* read EC values from used list */ |
| 630 | for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { |
| 631 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 632 | fm_pos += sizeof(*fmec); |
| 633 | if (fm_pos >= fm_size) |
| 634 | goto fail_bad; |
| 635 | |
| 636 | add_aeb(ai, &used, be32_to_cpu(fmec->pnum), |
| 637 | be32_to_cpu(fmec->ec), 0); |
| 638 | } |
| 639 | |
| 640 | /* read EC values from scrub list */ |
| 641 | for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { |
| 642 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 643 | fm_pos += sizeof(*fmec); |
| 644 | if (fm_pos >= fm_size) |
| 645 | goto fail_bad; |
| 646 | |
| 647 | add_aeb(ai, &used, be32_to_cpu(fmec->pnum), |
| 648 | be32_to_cpu(fmec->ec), 1); |
| 649 | } |
| 650 | |
| 651 | /* read EC values from erase list */ |
| 652 | for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { |
| 653 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 654 | fm_pos += sizeof(*fmec); |
| 655 | if (fm_pos >= fm_size) |
| 656 | goto fail_bad; |
| 657 | |
| 658 | add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), |
| 659 | be32_to_cpu(fmec->ec), 1); |
| 660 | } |
| 661 | |
| 662 | ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); |
| 663 | ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); |
| 664 | |
| 665 | /* Iterate over all volumes and read their EBA table */ |
| 666 | for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { |
| 667 | fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); |
| 668 | fm_pos += sizeof(*fmvhdr); |
| 669 | if (fm_pos >= fm_size) |
| 670 | goto fail_bad; |
| 671 | |
| 672 | if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 673 | ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 674 | be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); |
| 675 | goto fail_bad; |
| 676 | } |
| 677 | |
| 678 | av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), |
| 679 | be32_to_cpu(fmvhdr->used_ebs), |
| 680 | be32_to_cpu(fmvhdr->data_pad), |
| 681 | fmvhdr->vol_type, |
| 682 | be32_to_cpu(fmvhdr->last_eb_bytes)); |
| 683 | |
| 684 | if (!av) |
| 685 | goto fail_bad; |
| 686 | |
| 687 | ai->vols_found++; |
| 688 | if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) |
| 689 | ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); |
| 690 | |
| 691 | fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); |
| 692 | fm_pos += sizeof(*fm_eba); |
| 693 | fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); |
| 694 | if (fm_pos >= fm_size) |
| 695 | goto fail_bad; |
| 696 | |
| 697 | if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 698 | ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 699 | be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); |
| 700 | goto fail_bad; |
| 701 | } |
| 702 | |
| 703 | for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { |
| 704 | int pnum = be32_to_cpu(fm_eba->pnum[j]); |
| 705 | |
| 706 | if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) |
| 707 | continue; |
| 708 | |
| 709 | aeb = NULL; |
| 710 | list_for_each_entry(tmp_aeb, &used, u.list) { |
Brian Pomerantz | 584d462 | 2013-05-01 17:10:44 -0700 | [diff] [blame] | 711 | if (tmp_aeb->pnum == pnum) { |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 712 | aeb = tmp_aeb; |
Brian Pomerantz | 584d462 | 2013-05-01 17:10:44 -0700 | [diff] [blame] | 713 | break; |
| 714 | } |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 715 | } |
| 716 | |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 717 | if (!aeb) { |
Richard Weinberger | d141a8e | 2014-10-07 21:39:20 +0200 | [diff] [blame] | 718 | ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum); |
| 719 | goto fail_bad; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 720 | } |
| 721 | |
| 722 | aeb->lnum = j; |
| 723 | |
| 724 | if (av->highest_lnum <= aeb->lnum) |
| 725 | av->highest_lnum = aeb->lnum; |
| 726 | |
| 727 | assign_aeb_to_av(ai, aeb, av); |
| 728 | |
| 729 | dbg_bld("inserting PEB:%i (LEB %i) to vol %i", |
| 730 | aeb->pnum, aeb->lnum, av->vol_id); |
| 731 | } |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 732 | } |
| 733 | |
Richard Weinberger | d141a8e | 2014-10-07 21:39:20 +0200 | [diff] [blame] | 734 | ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 735 | if (ret) |
| 736 | goto fail; |
| 737 | |
Richard Weinberger | d141a8e | 2014-10-07 21:39:20 +0200 | [diff] [blame] | 738 | ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 739 | if (ret) |
| 740 | goto fail; |
| 741 | |
| 742 | if (max_sqnum > ai->max_sqnum) |
| 743 | ai->max_sqnum = max_sqnum; |
| 744 | |
Wei Yongjun | 6a059ab | 2012-10-09 14:14:21 +0800 | [diff] [blame] | 745 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) |
| 746 | list_move_tail(&tmp_aeb->u.list, &ai->free); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 747 | |
Richard Weinberger | a83832a | 2014-10-07 18:51:07 +0200 | [diff] [blame] | 748 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) |
| 749 | list_move_tail(&tmp_aeb->u.list, &ai->erase); |
| 750 | |
Richard Weinberger | ae0d146 | 2013-09-28 15:55:16 +0200 | [diff] [blame] | 751 | ubi_assert(list_empty(&free)); |
| 752 | |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 753 | /* |
| 754 | * If fastmap is leaking PEBs (must not happen), raise a |
| 755 | * fat warning and fall back to scanning mode. |
| 756 | * We do this here because in ubi_wl_init() it's too late |
| 757 | * and we cannot fall back to scanning. |
| 758 | */ |
| 759 | if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - |
| 760 | ai->bad_peb_count - fm->used_blocks)) |
| 761 | goto fail_bad; |
| 762 | |
| 763 | return 0; |
| 764 | |
| 765 | fail_bad: |
| 766 | ret = UBI_BAD_FASTMAP; |
| 767 | fail: |
Richard Weinberger | fe24c6e | 2013-09-28 15:55:15 +0200 | [diff] [blame] | 768 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { |
Richard Weinberger | fe24c6e | 2013-09-28 15:55:15 +0200 | [diff] [blame] | 769 | list_del(&tmp_aeb->u.list); |
Dan Carpenter | 5547fec | 2014-01-29 16:17:57 +0300 | [diff] [blame] | 770 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); |
Richard Weinberger | fe24c6e | 2013-09-28 15:55:15 +0200 | [diff] [blame] | 771 | } |
Richard Weinberger | fe24c6e | 2013-09-28 15:55:15 +0200 | [diff] [blame] | 772 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { |
Richard Weinberger | fe24c6e | 2013-09-28 15:55:15 +0200 | [diff] [blame] | 773 | list_del(&tmp_aeb->u.list); |
Dan Carpenter | 5547fec | 2014-01-29 16:17:57 +0300 | [diff] [blame] | 774 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); |
Richard Weinberger | fe24c6e | 2013-09-28 15:55:15 +0200 | [diff] [blame] | 775 | } |
| 776 | |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 777 | return ret; |
| 778 | } |
| 779 | |
| 780 | /** |
| 781 | * ubi_scan_fastmap - scan the fastmap. |
| 782 | * @ubi: UBI device object |
| 783 | * @ai: UBI attach info to be filled |
| 784 | * @fm_anchor: The fastmap starts at this PEB |
| 785 | * |
| 786 | * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, |
| 787 | * UBI_BAD_FASTMAP if one was found but is not usable. |
| 788 | * < 0 indicates an internal error. |
| 789 | */ |
| 790 | int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, |
| 791 | int fm_anchor) |
| 792 | { |
| 793 | struct ubi_fm_sb *fmsb, *fmsb2; |
| 794 | struct ubi_vid_hdr *vh; |
| 795 | struct ubi_ec_hdr *ech; |
| 796 | struct ubi_fastmap_layout *fm; |
| 797 | int i, used_blocks, pnum, ret = 0; |
| 798 | size_t fm_size; |
| 799 | __be32 crc, tmp_crc; |
| 800 | unsigned long long sqnum = 0; |
| 801 | |
| 802 | mutex_lock(&ubi->fm_mutex); |
| 803 | memset(ubi->fm_buf, 0, ubi->fm_size); |
| 804 | |
| 805 | fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); |
| 806 | if (!fmsb) { |
| 807 | ret = -ENOMEM; |
| 808 | goto out; |
| 809 | } |
| 810 | |
| 811 | fm = kzalloc(sizeof(*fm), GFP_KERNEL); |
| 812 | if (!fm) { |
| 813 | ret = -ENOMEM; |
| 814 | kfree(fmsb); |
| 815 | goto out; |
| 816 | } |
| 817 | |
| 818 | ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); |
| 819 | if (ret && ret != UBI_IO_BITFLIPS) |
| 820 | goto free_fm_sb; |
| 821 | else if (ret == UBI_IO_BITFLIPS) |
| 822 | fm->to_be_tortured[0] = 1; |
| 823 | |
| 824 | if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 825 | ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 826 | be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); |
| 827 | ret = UBI_BAD_FASTMAP; |
| 828 | goto free_fm_sb; |
| 829 | } |
| 830 | |
| 831 | if (fmsb->version != UBI_FM_FMT_VERSION) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 832 | ubi_err(ubi, "bad fastmap version: %i, expected: %i", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 833 | fmsb->version, UBI_FM_FMT_VERSION); |
| 834 | ret = UBI_BAD_FASTMAP; |
| 835 | goto free_fm_sb; |
| 836 | } |
| 837 | |
| 838 | used_blocks = be32_to_cpu(fmsb->used_blocks); |
| 839 | if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 840 | ubi_err(ubi, "number of fastmap blocks is invalid: %i", |
| 841 | used_blocks); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 842 | ret = UBI_BAD_FASTMAP; |
| 843 | goto free_fm_sb; |
| 844 | } |
| 845 | |
| 846 | fm_size = ubi->leb_size * used_blocks; |
| 847 | if (fm_size != ubi->fm_size) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 848 | ubi_err(ubi, "bad fastmap size: %zi, expected: %zi", |
| 849 | fm_size, ubi->fm_size); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 850 | ret = UBI_BAD_FASTMAP; |
| 851 | goto free_fm_sb; |
| 852 | } |
| 853 | |
| 854 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
| 855 | if (!ech) { |
| 856 | ret = -ENOMEM; |
| 857 | goto free_fm_sb; |
| 858 | } |
| 859 | |
| 860 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
| 861 | if (!vh) { |
| 862 | ret = -ENOMEM; |
| 863 | goto free_hdr; |
| 864 | } |
| 865 | |
| 866 | for (i = 0; i < used_blocks; i++) { |
Richard Genoud | c22301a | 2013-09-28 15:55:13 +0200 | [diff] [blame] | 867 | int image_seq; |
| 868 | |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 869 | pnum = be32_to_cpu(fmsb->block_loc[i]); |
| 870 | |
| 871 | if (ubi_io_is_bad(ubi, pnum)) { |
| 872 | ret = UBI_BAD_FASTMAP; |
| 873 | goto free_hdr; |
| 874 | } |
| 875 | |
| 876 | ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); |
| 877 | if (ret && ret != UBI_IO_BITFLIPS) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 878 | ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 879 | i, pnum); |
| 880 | if (ret > 0) |
| 881 | ret = UBI_BAD_FASTMAP; |
| 882 | goto free_hdr; |
| 883 | } else if (ret == UBI_IO_BITFLIPS) |
| 884 | fm->to_be_tortured[i] = 1; |
| 885 | |
Richard Genoud | c22301a | 2013-09-28 15:55:13 +0200 | [diff] [blame] | 886 | image_seq = be32_to_cpu(ech->image_seq); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 887 | if (!ubi->image_seq) |
Richard Genoud | c22301a | 2013-09-28 15:55:13 +0200 | [diff] [blame] | 888 | ubi->image_seq = image_seq; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 889 | |
Richard Genoud | c22301a | 2013-09-28 15:55:13 +0200 | [diff] [blame] | 890 | /* |
| 891 | * Older UBI implementations have image_seq set to zero, so |
| 892 | * we shouldn't fail if image_seq == 0. |
| 893 | */ |
| 894 | if (image_seq && (image_seq != ubi->image_seq)) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 895 | ubi_err(ubi, "wrong image seq:%d instead of %d", |
Richard Genoud | c22301a | 2013-09-28 15:55:13 +0200 | [diff] [blame] | 896 | be32_to_cpu(ech->image_seq), ubi->image_seq); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 897 | ret = UBI_BAD_FASTMAP; |
| 898 | goto free_hdr; |
| 899 | } |
| 900 | |
| 901 | ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); |
| 902 | if (ret && ret != UBI_IO_BITFLIPS) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 903 | ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 904 | i, pnum); |
| 905 | goto free_hdr; |
| 906 | } |
| 907 | |
| 908 | if (i == 0) { |
| 909 | if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 910 | ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 911 | be32_to_cpu(vh->vol_id), |
| 912 | UBI_FM_SB_VOLUME_ID); |
| 913 | ret = UBI_BAD_FASTMAP; |
| 914 | goto free_hdr; |
| 915 | } |
| 916 | } else { |
| 917 | if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 918 | ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 919 | be32_to_cpu(vh->vol_id), |
| 920 | UBI_FM_DATA_VOLUME_ID); |
| 921 | ret = UBI_BAD_FASTMAP; |
| 922 | goto free_hdr; |
| 923 | } |
| 924 | } |
| 925 | |
| 926 | if (sqnum < be64_to_cpu(vh->sqnum)) |
| 927 | sqnum = be64_to_cpu(vh->sqnum); |
| 928 | |
| 929 | ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, |
| 930 | ubi->leb_start, ubi->leb_size); |
| 931 | if (ret && ret != UBI_IO_BITFLIPS) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 932 | ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 933 | "err: %i)", i, pnum, ret); |
| 934 | goto free_hdr; |
| 935 | } |
| 936 | } |
| 937 | |
| 938 | kfree(fmsb); |
| 939 | fmsb = NULL; |
| 940 | |
| 941 | fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); |
| 942 | tmp_crc = be32_to_cpu(fmsb2->data_crc); |
| 943 | fmsb2->data_crc = 0; |
| 944 | crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); |
| 945 | if (crc != tmp_crc) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 946 | ubi_err(ubi, "fastmap data CRC is invalid"); |
| 947 | ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x", |
| 948 | tmp_crc, crc); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 949 | ret = UBI_BAD_FASTMAP; |
| 950 | goto free_hdr; |
| 951 | } |
| 952 | |
| 953 | fmsb2->sqnum = sqnum; |
| 954 | |
| 955 | fm->used_blocks = used_blocks; |
| 956 | |
| 957 | ret = ubi_attach_fastmap(ubi, ai, fm); |
| 958 | if (ret) { |
| 959 | if (ret > 0) |
| 960 | ret = UBI_BAD_FASTMAP; |
| 961 | goto free_hdr; |
| 962 | } |
| 963 | |
| 964 | for (i = 0; i < used_blocks; i++) { |
| 965 | struct ubi_wl_entry *e; |
| 966 | |
| 967 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); |
| 968 | if (!e) { |
| 969 | while (i--) |
| 970 | kfree(fm->e[i]); |
| 971 | |
| 972 | ret = -ENOMEM; |
| 973 | goto free_hdr; |
| 974 | } |
| 975 | |
| 976 | e->pnum = be32_to_cpu(fmsb2->block_loc[i]); |
| 977 | e->ec = be32_to_cpu(fmsb2->block_ec[i]); |
| 978 | fm->e[i] = e; |
| 979 | } |
| 980 | |
| 981 | ubi->fm = fm; |
| 982 | ubi->fm_pool.max_size = ubi->fm->max_pool_size; |
| 983 | ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 984 | ubi_msg(ubi, "attached by fastmap"); |
| 985 | ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size); |
| 986 | ubi_msg(ubi, "fastmap WL pool size: %d", |
| 987 | ubi->fm_wl_pool.max_size); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 988 | ubi->fm_disabled = 0; |
| 989 | |
| 990 | ubi_free_vid_hdr(ubi, vh); |
| 991 | kfree(ech); |
| 992 | out: |
| 993 | mutex_unlock(&ubi->fm_mutex); |
| 994 | if (ret == UBI_BAD_FASTMAP) |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 995 | ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 996 | return ret; |
| 997 | |
| 998 | free_hdr: |
| 999 | ubi_free_vid_hdr(ubi, vh); |
| 1000 | kfree(ech); |
| 1001 | free_fm_sb: |
| 1002 | kfree(fmsb); |
| 1003 | kfree(fm); |
| 1004 | goto out; |
| 1005 | } |
| 1006 | |
| 1007 | /** |
| 1008 | * ubi_write_fastmap - writes a fastmap. |
| 1009 | * @ubi: UBI device object |
| 1010 | * @new_fm: the to be written fastmap |
| 1011 | * |
| 1012 | * Returns 0 on success, < 0 indicates an internal error. |
| 1013 | */ |
| 1014 | static int ubi_write_fastmap(struct ubi_device *ubi, |
| 1015 | struct ubi_fastmap_layout *new_fm) |
| 1016 | { |
| 1017 | size_t fm_pos = 0; |
| 1018 | void *fm_raw; |
| 1019 | struct ubi_fm_sb *fmsb; |
| 1020 | struct ubi_fm_hdr *fmh; |
| 1021 | struct ubi_fm_scan_pool *fmpl1, *fmpl2; |
| 1022 | struct ubi_fm_ec *fec; |
| 1023 | struct ubi_fm_volhdr *fvh; |
| 1024 | struct ubi_fm_eba *feba; |
| 1025 | struct rb_node *node; |
| 1026 | struct ubi_wl_entry *wl_e; |
| 1027 | struct ubi_volume *vol; |
| 1028 | struct ubi_vid_hdr *avhdr, *dvhdr; |
| 1029 | struct ubi_work *ubi_wrk; |
| 1030 | int ret, i, j, free_peb_count, used_peb_count, vol_count; |
| 1031 | int scrub_peb_count, erase_peb_count; |
| 1032 | |
| 1033 | fm_raw = ubi->fm_buf; |
| 1034 | memset(ubi->fm_buf, 0, ubi->fm_size); |
| 1035 | |
| 1036 | avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); |
| 1037 | if (!avhdr) { |
| 1038 | ret = -ENOMEM; |
| 1039 | goto out; |
| 1040 | } |
| 1041 | |
| 1042 | dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); |
| 1043 | if (!dvhdr) { |
| 1044 | ret = -ENOMEM; |
| 1045 | goto out_kfree; |
| 1046 | } |
| 1047 | |
| 1048 | spin_lock(&ubi->volumes_lock); |
| 1049 | spin_lock(&ubi->wl_lock); |
| 1050 | |
| 1051 | fmsb = (struct ubi_fm_sb *)fm_raw; |
| 1052 | fm_pos += sizeof(*fmsb); |
| 1053 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1054 | |
| 1055 | fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); |
| 1056 | fm_pos += sizeof(*fmh); |
| 1057 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1058 | |
| 1059 | fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); |
| 1060 | fmsb->version = UBI_FM_FMT_VERSION; |
| 1061 | fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); |
| 1062 | /* the max sqnum will be filled in while *reading* the fastmap */ |
| 1063 | fmsb->sqnum = 0; |
| 1064 | |
| 1065 | fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); |
| 1066 | free_peb_count = 0; |
| 1067 | used_peb_count = 0; |
| 1068 | scrub_peb_count = 0; |
| 1069 | erase_peb_count = 0; |
| 1070 | vol_count = 0; |
| 1071 | |
| 1072 | fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); |
| 1073 | fm_pos += sizeof(*fmpl1); |
| 1074 | fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); |
| 1075 | fmpl1->size = cpu_to_be16(ubi->fm_pool.size); |
| 1076 | fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); |
| 1077 | |
| 1078 | for (i = 0; i < ubi->fm_pool.size; i++) |
| 1079 | fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); |
| 1080 | |
| 1081 | fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); |
| 1082 | fm_pos += sizeof(*fmpl2); |
| 1083 | fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); |
| 1084 | fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); |
| 1085 | fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); |
| 1086 | |
| 1087 | for (i = 0; i < ubi->fm_wl_pool.size; i++) |
| 1088 | fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); |
| 1089 | |
| 1090 | for (node = rb_first(&ubi->free); node; node = rb_next(node)) { |
| 1091 | wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); |
| 1092 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 1093 | |
| 1094 | fec->pnum = cpu_to_be32(wl_e->pnum); |
| 1095 | fec->ec = cpu_to_be32(wl_e->ec); |
| 1096 | |
| 1097 | free_peb_count++; |
| 1098 | fm_pos += sizeof(*fec); |
| 1099 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1100 | } |
| 1101 | fmh->free_peb_count = cpu_to_be32(free_peb_count); |
| 1102 | |
| 1103 | for (node = rb_first(&ubi->used); node; node = rb_next(node)) { |
| 1104 | wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); |
| 1105 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 1106 | |
| 1107 | fec->pnum = cpu_to_be32(wl_e->pnum); |
| 1108 | fec->ec = cpu_to_be32(wl_e->ec); |
| 1109 | |
| 1110 | used_peb_count++; |
| 1111 | fm_pos += sizeof(*fec); |
| 1112 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1113 | } |
Richard Weinberger | 4f5e3b6 | 2014-11-24 14:20:31 +0100 | [diff] [blame] | 1114 | |
| 1115 | for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) { |
| 1116 | list_for_each_entry(wl_e, &ubi->pq[i], u.list) { |
| 1117 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 1118 | |
| 1119 | fec->pnum = cpu_to_be32(wl_e->pnum); |
| 1120 | fec->ec = cpu_to_be32(wl_e->ec); |
| 1121 | |
| 1122 | used_peb_count++; |
| 1123 | fm_pos += sizeof(*fec); |
| 1124 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1125 | } |
| 1126 | } |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1127 | fmh->used_peb_count = cpu_to_be32(used_peb_count); |
| 1128 | |
| 1129 | for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { |
| 1130 | wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); |
| 1131 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 1132 | |
| 1133 | fec->pnum = cpu_to_be32(wl_e->pnum); |
| 1134 | fec->ec = cpu_to_be32(wl_e->ec); |
| 1135 | |
| 1136 | scrub_peb_count++; |
| 1137 | fm_pos += sizeof(*fec); |
| 1138 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1139 | } |
| 1140 | fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); |
| 1141 | |
| 1142 | |
| 1143 | list_for_each_entry(ubi_wrk, &ubi->works, list) { |
| 1144 | if (ubi_is_erase_work(ubi_wrk)) { |
| 1145 | wl_e = ubi_wrk->e; |
| 1146 | ubi_assert(wl_e); |
| 1147 | |
| 1148 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); |
| 1149 | |
| 1150 | fec->pnum = cpu_to_be32(wl_e->pnum); |
| 1151 | fec->ec = cpu_to_be32(wl_e->ec); |
| 1152 | |
| 1153 | erase_peb_count++; |
| 1154 | fm_pos += sizeof(*fec); |
| 1155 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1156 | } |
| 1157 | } |
| 1158 | fmh->erase_peb_count = cpu_to_be32(erase_peb_count); |
| 1159 | |
| 1160 | for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { |
| 1161 | vol = ubi->volumes[i]; |
| 1162 | |
| 1163 | if (!vol) |
| 1164 | continue; |
| 1165 | |
| 1166 | vol_count++; |
| 1167 | |
| 1168 | fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); |
| 1169 | fm_pos += sizeof(*fvh); |
| 1170 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1171 | |
| 1172 | fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); |
| 1173 | fvh->vol_id = cpu_to_be32(vol->vol_id); |
| 1174 | fvh->vol_type = vol->vol_type; |
| 1175 | fvh->used_ebs = cpu_to_be32(vol->used_ebs); |
| 1176 | fvh->data_pad = cpu_to_be32(vol->data_pad); |
| 1177 | fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); |
| 1178 | |
| 1179 | ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || |
| 1180 | vol->vol_type == UBI_STATIC_VOLUME); |
| 1181 | |
| 1182 | feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); |
| 1183 | fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); |
| 1184 | ubi_assert(fm_pos <= ubi->fm_size); |
| 1185 | |
| 1186 | for (j = 0; j < vol->reserved_pebs; j++) |
| 1187 | feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); |
| 1188 | |
| 1189 | feba->reserved_pebs = cpu_to_be32(j); |
| 1190 | feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); |
| 1191 | } |
| 1192 | fmh->vol_count = cpu_to_be32(vol_count); |
| 1193 | fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); |
| 1194 | |
| 1195 | avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
| 1196 | avhdr->lnum = 0; |
| 1197 | |
| 1198 | spin_unlock(&ubi->wl_lock); |
| 1199 | spin_unlock(&ubi->volumes_lock); |
| 1200 | |
| 1201 | dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); |
| 1202 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); |
| 1203 | if (ret) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1204 | ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1205 | goto out_kfree; |
| 1206 | } |
| 1207 | |
| 1208 | for (i = 0; i < new_fm->used_blocks; i++) { |
| 1209 | fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); |
| 1210 | fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); |
| 1211 | } |
| 1212 | |
| 1213 | fmsb->data_crc = 0; |
| 1214 | fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, |
| 1215 | ubi->fm_size)); |
| 1216 | |
| 1217 | for (i = 1; i < new_fm->used_blocks; i++) { |
| 1218 | dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
| 1219 | dvhdr->lnum = cpu_to_be32(i); |
| 1220 | dbg_bld("writing fastmap data to PEB %i sqnum %llu", |
| 1221 | new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); |
| 1222 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); |
| 1223 | if (ret) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1224 | ubi_err(ubi, "unable to write vid_hdr to PEB %i!", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1225 | new_fm->e[i]->pnum); |
| 1226 | goto out_kfree; |
| 1227 | } |
| 1228 | } |
| 1229 | |
| 1230 | for (i = 0; i < new_fm->used_blocks; i++) { |
| 1231 | ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), |
| 1232 | new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); |
| 1233 | if (ret) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1234 | ubi_err(ubi, "unable to write fastmap to PEB %i!", |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1235 | new_fm->e[i]->pnum); |
| 1236 | goto out_kfree; |
| 1237 | } |
| 1238 | } |
| 1239 | |
| 1240 | ubi_assert(new_fm); |
| 1241 | ubi->fm = new_fm; |
| 1242 | |
| 1243 | dbg_bld("fastmap written!"); |
| 1244 | |
| 1245 | out_kfree: |
| 1246 | ubi_free_vid_hdr(ubi, avhdr); |
| 1247 | ubi_free_vid_hdr(ubi, dvhdr); |
| 1248 | out: |
| 1249 | return ret; |
| 1250 | } |
| 1251 | |
| 1252 | /** |
| 1253 | * erase_block - Manually erase a PEB. |
| 1254 | * @ubi: UBI device object |
| 1255 | * @pnum: PEB to be erased |
| 1256 | * |
| 1257 | * Returns the new EC value on success, < 0 indicates an internal error. |
| 1258 | */ |
| 1259 | static int erase_block(struct ubi_device *ubi, int pnum) |
| 1260 | { |
| 1261 | int ret; |
| 1262 | struct ubi_ec_hdr *ec_hdr; |
| 1263 | long long ec; |
| 1264 | |
| 1265 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
| 1266 | if (!ec_hdr) |
| 1267 | return -ENOMEM; |
| 1268 | |
| 1269 | ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); |
| 1270 | if (ret < 0) |
| 1271 | goto out; |
| 1272 | else if (ret && ret != UBI_IO_BITFLIPS) { |
| 1273 | ret = -EINVAL; |
| 1274 | goto out; |
| 1275 | } |
| 1276 | |
| 1277 | ret = ubi_io_sync_erase(ubi, pnum, 0); |
| 1278 | if (ret < 0) |
| 1279 | goto out; |
| 1280 | |
| 1281 | ec = be64_to_cpu(ec_hdr->ec); |
| 1282 | ec += ret; |
| 1283 | if (ec > UBI_MAX_ERASECOUNTER) { |
| 1284 | ret = -EINVAL; |
| 1285 | goto out; |
| 1286 | } |
| 1287 | |
| 1288 | ec_hdr->ec = cpu_to_be64(ec); |
| 1289 | ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); |
| 1290 | if (ret < 0) |
| 1291 | goto out; |
| 1292 | |
| 1293 | ret = ec; |
| 1294 | out: |
| 1295 | kfree(ec_hdr); |
| 1296 | return ret; |
| 1297 | } |
| 1298 | |
| 1299 | /** |
| 1300 | * invalidate_fastmap - destroys a fastmap. |
| 1301 | * @ubi: UBI device object |
| 1302 | * @fm: the fastmap to be destroyed |
| 1303 | * |
| 1304 | * Returns 0 on success, < 0 indicates an internal error. |
| 1305 | */ |
| 1306 | static int invalidate_fastmap(struct ubi_device *ubi, |
| 1307 | struct ubi_fastmap_layout *fm) |
| 1308 | { |
Richard Weinberger | 8930fa5 | 2013-08-19 22:31:49 +0200 | [diff] [blame] | 1309 | int ret; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1310 | struct ubi_vid_hdr *vh; |
| 1311 | |
| 1312 | ret = erase_block(ubi, fm->e[0]->pnum); |
| 1313 | if (ret < 0) |
| 1314 | return ret; |
| 1315 | |
| 1316 | vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); |
| 1317 | if (!vh) |
| 1318 | return -ENOMEM; |
| 1319 | |
| 1320 | /* deleting the current fastmap SB is not enough, an old SB may exist, |
| 1321 | * so create a (corrupted) SB such that fastmap will find it and fall |
| 1322 | * back to scanning mode in any case */ |
| 1323 | vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
| 1324 | ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); |
| 1325 | |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1326 | return ret; |
| 1327 | } |
| 1328 | |
| 1329 | /** |
| 1330 | * ubi_update_fastmap - will be called by UBI if a volume changes or |
| 1331 | * a fastmap pool becomes full. |
| 1332 | * @ubi: UBI device object |
| 1333 | * |
| 1334 | * Returns 0 on success, < 0 indicates an internal error. |
| 1335 | */ |
| 1336 | int ubi_update_fastmap(struct ubi_device *ubi) |
| 1337 | { |
| 1338 | int ret, i; |
| 1339 | struct ubi_fastmap_layout *new_fm, *old_fm; |
| 1340 | struct ubi_wl_entry *tmp_e; |
| 1341 | |
| 1342 | mutex_lock(&ubi->fm_mutex); |
| 1343 | |
| 1344 | ubi_refill_pools(ubi); |
| 1345 | |
| 1346 | if (ubi->ro_mode || ubi->fm_disabled) { |
| 1347 | mutex_unlock(&ubi->fm_mutex); |
| 1348 | return 0; |
| 1349 | } |
| 1350 | |
| 1351 | ret = ubi_ensure_anchor_pebs(ubi); |
| 1352 | if (ret) { |
| 1353 | mutex_unlock(&ubi->fm_mutex); |
| 1354 | return ret; |
| 1355 | } |
| 1356 | |
| 1357 | new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); |
| 1358 | if (!new_fm) { |
| 1359 | mutex_unlock(&ubi->fm_mutex); |
| 1360 | return -ENOMEM; |
| 1361 | } |
| 1362 | |
| 1363 | new_fm->used_blocks = ubi->fm_size / ubi->leb_size; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1364 | old_fm = ubi->fm; |
| 1365 | ubi->fm = NULL; |
| 1366 | |
| 1367 | if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1368 | ubi_err(ubi, "fastmap too large"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1369 | ret = -ENOSPC; |
| 1370 | goto err; |
| 1371 | } |
| 1372 | |
| 1373 | for (i = 1; i < new_fm->used_blocks; i++) { |
| 1374 | spin_lock(&ubi->wl_lock); |
| 1375 | tmp_e = ubi_wl_get_fm_peb(ubi, 0); |
| 1376 | spin_unlock(&ubi->wl_lock); |
| 1377 | |
| 1378 | if (!tmp_e && !old_fm) { |
| 1379 | int j; |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1380 | ubi_err(ubi, "could not get any free erase block"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1381 | |
| 1382 | for (j = 1; j < i; j++) |
| 1383 | ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); |
| 1384 | |
| 1385 | ret = -ENOSPC; |
| 1386 | goto err; |
| 1387 | } else if (!tmp_e && old_fm) { |
| 1388 | ret = erase_block(ubi, old_fm->e[i]->pnum); |
| 1389 | if (ret < 0) { |
| 1390 | int j; |
| 1391 | |
| 1392 | for (j = 1; j < i; j++) |
| 1393 | ubi_wl_put_fm_peb(ubi, new_fm->e[j], |
| 1394 | j, 0); |
| 1395 | |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1396 | ubi_err(ubi, "could not erase old fastmap PEB"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1397 | goto err; |
| 1398 | } |
Richard Weinberger | c4ca6be | 2014-10-06 14:47:51 +0200 | [diff] [blame] | 1399 | new_fm->e[i] = old_fm->e[i]; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1400 | } else { |
Richard Weinberger | c4ca6be | 2014-10-06 14:47:51 +0200 | [diff] [blame] | 1401 | new_fm->e[i] = tmp_e; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1402 | |
| 1403 | if (old_fm) |
| 1404 | ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, |
| 1405 | old_fm->to_be_tortured[i]); |
| 1406 | } |
| 1407 | } |
| 1408 | |
| 1409 | spin_lock(&ubi->wl_lock); |
| 1410 | tmp_e = ubi_wl_get_fm_peb(ubi, 1); |
| 1411 | spin_unlock(&ubi->wl_lock); |
| 1412 | |
| 1413 | if (old_fm) { |
| 1414 | /* no fresh anchor PEB was found, reuse the old one */ |
| 1415 | if (!tmp_e) { |
| 1416 | ret = erase_block(ubi, old_fm->e[0]->pnum); |
| 1417 | if (ret < 0) { |
| 1418 | int i; |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1419 | ubi_err(ubi, "could not erase old anchor PEB"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1420 | |
| 1421 | for (i = 1; i < new_fm->used_blocks; i++) |
| 1422 | ubi_wl_put_fm_peb(ubi, new_fm->e[i], |
| 1423 | i, 0); |
| 1424 | goto err; |
| 1425 | } |
Richard Weinberger | c4ca6be | 2014-10-06 14:47:51 +0200 | [diff] [blame] | 1426 | new_fm->e[0] = old_fm->e[0]; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1427 | new_fm->e[0]->ec = ret; |
| 1428 | } else { |
| 1429 | /* we've got a new anchor PEB, return the old one */ |
| 1430 | ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, |
| 1431 | old_fm->to_be_tortured[0]); |
Richard Weinberger | c4ca6be | 2014-10-06 14:47:51 +0200 | [diff] [blame] | 1432 | new_fm->e[0] = tmp_e; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1433 | } |
| 1434 | } else { |
| 1435 | if (!tmp_e) { |
| 1436 | int i; |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1437 | ubi_err(ubi, "could not find any anchor PEB"); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1438 | |
| 1439 | for (i = 1; i < new_fm->used_blocks; i++) |
| 1440 | ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); |
| 1441 | |
| 1442 | ret = -ENOSPC; |
| 1443 | goto err; |
| 1444 | } |
Richard Weinberger | c4ca6be | 2014-10-06 14:47:51 +0200 | [diff] [blame] | 1445 | new_fm->e[0] = tmp_e; |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1446 | } |
| 1447 | |
| 1448 | down_write(&ubi->work_sem); |
| 1449 | down_write(&ubi->fm_sem); |
| 1450 | ret = ubi_write_fastmap(ubi, new_fm); |
| 1451 | up_write(&ubi->fm_sem); |
| 1452 | up_write(&ubi->work_sem); |
| 1453 | |
| 1454 | if (ret) |
| 1455 | goto err; |
| 1456 | |
| 1457 | out_unlock: |
| 1458 | mutex_unlock(&ubi->fm_mutex); |
| 1459 | kfree(old_fm); |
| 1460 | return ret; |
| 1461 | |
| 1462 | err: |
| 1463 | kfree(new_fm); |
| 1464 | |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1465 | ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret); |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1466 | |
| 1467 | ret = 0; |
| 1468 | if (old_fm) { |
| 1469 | ret = invalidate_fastmap(ubi, old_fm); |
Richard Weinberger | 2d93fb3 | 2014-10-07 21:45:19 +0200 | [diff] [blame] | 1470 | if (ret < 0) { |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1471 | ubi_err(ubi, "Unable to invalidiate current fastmap!"); |
Richard Weinberger | 2d93fb3 | 2014-10-07 21:45:19 +0200 | [diff] [blame] | 1472 | ubi_ro_mode(ubi); |
| 1473 | } |
Richard Weinberger | dbb7d2a | 2012-09-26 17:51:49 +0200 | [diff] [blame] | 1474 | else if (ret) |
| 1475 | ret = 0; |
| 1476 | } |
| 1477 | goto out_unlock; |
| 1478 | } |