1// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause 2/* 3 * Copyright (c) Thomas Gleixner <tglx@linutronix.de> 4 * 5 * The parts taken from the kernel implementation are: 6 * 7 * Copyright (c) International Business Machines Corp., 2006 8 */ 9 10#include <common.h> 11#include <errno.h> 12#include <linux/bug.h> 13#include <u-boot/crc.h> 14#include <ubispl.h> 15 16#include <linux/bitops.h> 17#include <linux/crc32.h> 18 19#include "ubispl.h" 20 21/** 22 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 23 * @ubi: UBI device description object 24 */ 25static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi) 26{ 27 size_t size; 28 29 size = sizeof(struct ubi_fm_sb) + 30 sizeof(struct ubi_fm_hdr) + 31 sizeof(struct ubi_fm_scan_pool) + 32 sizeof(struct ubi_fm_scan_pool) + 33 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + 34 (sizeof(struct ubi_fm_eba) + 35 (ubi->peb_count * sizeof(__be32))) + 36 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; 37 return roundup(size, ubi->leb_size); 38} 39 40static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum, 41 unsigned long from, unsigned long len) 42{ 43 return ubi->read(pnum + ubi->peb_offset, from, len, buf); 44} 45 46static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb) 47{ 48 return peb >= ubi->peb_count || peb < 0; 49} 50 51#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME 52 53/** 54 * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object. 55 * @r: the object to dump 56 * @idx: volume table index 57 */ 58void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) 59{ 60 int name_len = be16_to_cpu(r->name_len); 61 62 ubi_dbg("Volume table record %d dump: size: %d", 63 idx, sizeof(struct ubi_vtbl_record)); 64 ubi_dbg("\treserved_pebs %d", be32_to_cpu(r->reserved_pebs)); 65 ubi_dbg("\talignment %d", be32_to_cpu(r->alignment)); 66 ubi_dbg("\tdata_pad %d", be32_to_cpu(r->data_pad)); 67 ubi_dbg("\tvol_type %d", (int)r->vol_type); 68 ubi_dbg("\tupd_marker %d", (int)r->upd_marker); 69 ubi_dbg("\tname_len %d", name_len); 70 71 if (r->name[0] == '\0') { 72 ubi_dbg("\tname NULL"); 73 return; 74 } 75 76 if (name_len <= UBI_VOL_NAME_MAX && 77 strnlen(&r->name[0], name_len + 1) == name_len) { 78 ubi_dbg("\tname %s", &r->name[0]); 79 } else { 80 ubi_dbg("\t1st 5 characters of name: %c%c%c%c%c", 81 r->name[0], r->name[1], r->name[2], r->name[3], 82 r->name[4]); 83 } 84 ubi_dbg("\tcrc %#08x", be32_to_cpu(r->crc)); 85} 86 87/* Empty volume table record */ 88static struct ubi_vtbl_record empty_vtbl_record; 89 90/** 91 * vtbl_check - check if volume table is not corrupted and sensible. 92 * @ubi: UBI device description object 93 * @vtbl: volume table 94 * 95 * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, 96 * and %-EINVAL if it contains inconsistent data. 97 */ 98static int vtbl_check(struct ubi_scan_info *ubi, 99 struct ubi_vtbl_record *vtbl) 100{ 101 int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; 102 int upd_marker, err; 103 uint32_t crc; 104 const char *name; 105 106 for (i = 0; i < UBI_SPL_VOL_IDS; i++) { 107 reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); 108 alignment = be32_to_cpu(vtbl[i].alignment); 109 data_pad = be32_to_cpu(vtbl[i].data_pad); 110 upd_marker = vtbl[i].upd_marker; 111 vol_type = vtbl[i].vol_type; 112 name_len = be16_to_cpu(vtbl[i].name_len); 113 name = &vtbl[i].name[0]; 114 115 crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); 116 if (be32_to_cpu(vtbl[i].crc) != crc) { 117 ubi_err("bad CRC at record %u: %#08x, not %#08x", 118 i, crc, be32_to_cpu(vtbl[i].crc)); 119 ubi_dump_vtbl_record(&vtbl[i], i); 120 return 1; 121 } 122 123 if (reserved_pebs == 0) { 124 if (memcmp(&vtbl[i], &empty_vtbl_record, 125 UBI_VTBL_RECORD_SIZE)) { 126 err = 2; 127 goto bad; 128 } 129 continue; 130 } 131 132 if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || 133 name_len < 0) { 134 err = 3; 135 goto bad; 136 } 137 138 if (alignment > ubi->leb_size || alignment == 0) { 139 err = 4; 140 goto bad; 141 } 142 143 n = alignment & (CONFIG_SPL_UBI_VID_OFFSET - 1); 144 if (alignment != 1 && n) { 145 err = 5; 146 goto bad; 147 } 148 149 n = ubi->leb_size % alignment; 150 if (data_pad != n) { 151 ubi_err("bad data_pad, has to be %d", n); 152 err = 6; 153 goto bad; 154 } 155 156 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { 157 err = 7; 158 goto bad; 159 } 160 161 if (upd_marker != 0 && upd_marker != 1) { 162 err = 8; 163 goto bad; 164 } 165 166 if (name_len > UBI_VOL_NAME_MAX) { 167 err = 10; 168 goto bad; 169 } 170 171 if (name[0] == '\0') { 172 err = 11; 173 goto bad; 174 } 175 176 if (name_len != strnlen(name, name_len + 1)) { 177 err = 12; 178 goto bad; 179 } 180 181 ubi_dump_vtbl_record(&vtbl[i], i); 182 } 183 184 /* Checks that all names are unique */ 185 for (i = 0; i < UBI_SPL_VOL_IDS - 1; i++) { 186 for (n = i + 1; n < UBI_SPL_VOL_IDS; n++) { 187 int len1 = be16_to_cpu(vtbl[i].name_len); 188 int len2 = be16_to_cpu(vtbl[n].name_len); 189 190 if (len1 > 0 && len1 == len2 && 191 !strncmp(vtbl[i].name, vtbl[n].name, len1)) { 192 ubi_err("volumes %d and %d have the same name \"%s\"", 193 i, n, vtbl[i].name); 194 ubi_dump_vtbl_record(&vtbl[i], i); 195 ubi_dump_vtbl_record(&vtbl[n], n); 196 return -EINVAL; 197 } 198 } 199 } 200 201 return 0; 202 203bad: 204 ubi_err("volume table check failed: record %d, error %d", i, err); 205 ubi_dump_vtbl_record(&vtbl[i], i); 206 return -EINVAL; 207} 208 209static int ubi_read_volume_table(struct ubi_scan_info *ubi, u32 pnum) 210{ 211 int err = -EINVAL; 212 213 empty_vtbl_record.crc = cpu_to_be32(0xf116c36b); 214 215 err = ubi_io_read(ubi, &ubi->vtbl, pnum, ubi->leb_start, 216 sizeof(struct ubi_vtbl_record) * UBI_SPL_VOL_IDS); 217 if (err && err != UBI_IO_BITFLIPS) { 218 ubi_err("unable to read volume table"); 219 goto out; 220 } 221 222 if (!vtbl_check(ubi, ubi->vtbl)) { 223 ubi->vtbl_valid = 1; 224 err = 0; 225 } 226out: 227 return err; 228} 229 230#endif /* CONFIG_SPL_UBI_LOAD_BY_VOLNAME */ 231 232static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum, 233 struct ubi_vid_hdr *vh, int unused) 234{ 235 u32 magic; 236 int res; 237 238 /* No point in rescanning a corrupt block */ 239 if (test_bit(pnum, ubi->corrupt)) 240 return UBI_IO_BAD_HDR; 241 /* 242 * If the block has been scanned already, no need to rescan 243 */ 244 if (test_and_set_bit(pnum, ubi->scanned)) 245 return 0; 246 247 res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh)); 248 249 /* 250 * Bad block, unrecoverable ECC error, skip the block 251 */ 252 if (res) { 253 ubi_dbg("Skipping bad or unreadable block %d", pnum); 254 vh->magic = 0; 255 generic_set_bit(pnum, ubi->corrupt); 256 return res; 257 } 258 259 /* Magic number available ? */ 260 magic = be32_to_cpu(vh->magic); 261 if (magic != UBI_VID_HDR_MAGIC) { 262 generic_set_bit(pnum, ubi->corrupt); 263 if (magic == 0xffffffff) 264 return UBI_IO_FF; 265 ubi_msg("Bad magic in block 0%d %08x", pnum, magic); 266 return UBI_IO_BAD_HDR; 267 } 268 269 /* Header CRC correct ? */ 270 if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) != 271 be32_to_cpu(vh->hdr_crc)) { 272 ubi_msg("Bad CRC in block 0%d", pnum); 273 generic_set_bit(pnum, ubi->corrupt); 274 return UBI_IO_BAD_HDR; 275 } 276 277 ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum)); 278 279 return 0; 280} 281 282static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi, 283 struct ubi_vid_hdr *vh, 284 u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum) 285{ 286 int res; 287 288 if (ubi_io_is_bad(ubi, fm_pnum)) 289 return -EINVAL; 290 291 res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0); 292 if (!res) { 293 /* Check volume id, volume type and lnum */ 294 if (be32_to_cpu(vh->vol_id) == fm_vol_id && 295 vh->vol_type == UBI_VID_STATIC && 296 be32_to_cpu(vh->lnum) == fm_lnum) 297 return 0; 298 ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u", 299 fm_pnum, fm_vol_id, vh->vol_type, 300 be32_to_cpu(vh->vol_id), 301 fm_lnum, be32_to_cpu(vh->lnum)); 302 } 303 return res; 304} 305 306/* Insert the logic block into the volume info */ 307static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi, 308 struct ubi_vid_hdr *vh, u32 vol_id, 309 u32 pnum, u32 lnum) 310{ 311 struct ubi_vol_info *vi = ubi->volinfo + vol_id; 312 u32 *ltp; 313 314 /* 315 * If the volume is larger than expected, yell and give up :( 316 */ 317 if (lnum >= UBI_MAX_VOL_LEBS) { 318 ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS); 319 return -EINVAL; 320 } 321 322 ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d", 323 pnum, vol_id, lnum, !!test_bit(lnum, vi->found), 324 !!test_bit(pnum, ubi->scanned)); 325 326 /* Points to the translation entry */ 327 ltp = vi->lebs_to_pebs + lnum; 328 329 /* If the block is already assigned, check sqnum */ 330 if (__test_and_set_bit(lnum, vi->found)) { 331 u32 cur_pnum = *ltp; 332 struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum; 333 334 /* 335 * If the current block hase not yet been scanned, we 336 * need to do that. The other block might be stale or 337 * the current block corrupted and the FM not yet 338 * updated. 339 */ 340 if (!test_bit(cur_pnum, ubi->scanned)) { 341 /* 342 * If the scan fails, we use the valid block 343 */ 344 if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id, 345 lnum)) { 346 *ltp = pnum; 347 return 0; 348 } 349 } 350 351 /* 352 * Should not happen .... 353 */ 354 if (test_bit(cur_pnum, ubi->corrupt)) { 355 *ltp = pnum; 356 return 0; 357 } 358 359 ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu", 360 vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum, 361 be64_to_cpu(vh->sqnum)); 362 363 /* 364 * Compare sqnum and take the newer one 365 */ 366 if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum)) 367 *ltp = pnum; 368 } else { 369 *ltp = pnum; 370 if (lnum > vi->last_block) 371 vi->last_block = lnum; 372 } 373 374 return 0; 375} 376 377static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh, 378 u32 pnum) 379{ 380 u32 vol_id, lnum; 381 int res; 382 383 if (ubi_io_is_bad(ubi, pnum)) 384 return -EINVAL; 385 386 res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 387 if (res) 388 return res; 389 390 /* Get volume id */ 391 vol_id = be32_to_cpu(vh->vol_id); 392 393 /* If this is the fastmap anchor, return right away */ 394 if (vol_id == UBI_FM_SB_VOLUME_ID) 395 return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0; 396 397#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME 398 /* If this is a UBI volume table, read it and return */ 399 if (vol_id == UBI_LAYOUT_VOLUME_ID && !ubi->vtbl_valid) { 400 res = ubi_read_volume_table(ubi, pnum); 401 return res; 402 } 403#endif 404 405 /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */ 406 if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC) 407 return 0; 408 409#ifndef CONFIG_SPL_UBI_LOAD_BY_VOLNAME 410 /* We are only interested in the volumes to load */ 411 if (!test_bit(vol_id, ubi->toload)) 412 return 0; 413#endif 414 lnum = be32_to_cpu(vh->lnum); 415 return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum); 416} 417 418static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum, 419 u32 vol_id, u32 vol_type, u32 used) 420{ 421 struct ubi_vid_hdr *vh; 422 423 if (ubi_io_is_bad(ubi, pnum)) 424 return -EINVAL; 425 426 ubi->fastmap_pebs++; 427 428#ifndef CONFIG_SPL_UBI_LOAD_BY_VOLNAME 429 if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME) 430 return 0; 431 432 /* We are only interested in the volumes to load */ 433 if (!test_bit(vol_id, ubi->toload)) 434 return 0; 435#endif 436 vh = ubi->blockinfo + pnum; 437 438 return ubi_scan_vid_hdr(ubi, vh, pnum); 439} 440 441static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size) 442{ 443 struct ubi_vid_hdr *vh; 444 u32 pnum; 445 int i; 446 447 ubi_dbg("Scanning pool size: %d", pool_size); 448 449 for (i = 0; i < pool_size; i++) { 450 pnum = be32_to_cpu(pebs[i]); 451 452 if (ubi_io_is_bad(ubi, pnum)) { 453 ubi_err("FM: Bad PEB in fastmap pool! %u", pnum); 454 return UBI_BAD_FASTMAP; 455 } 456 457 vh = ubi->blockinfo + pnum; 458 /* 459 * We allow the scan to fail here. The loader will notice 460 * and look for a replacement. 461 */ 462 ubi_scan_vid_hdr(ubi, vh, pnum); 463 } 464 return 0; 465} 466 467/* 468 * Fastmap code is stolen from Linux kernel and this stub structure is used 469 * to make it happy. 470 */ 471struct ubi_attach_info { 472 int i; 473}; 474 475static int ubi_attach_fastmap(struct ubi_scan_info *ubi, 476 struct ubi_attach_info *ai, 477 struct ubi_fastmap_layout *fm) 478{ 479 struct ubi_fm_hdr *fmhdr; 480 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 481 struct ubi_fm_ec *fmec; 482 struct ubi_fm_volhdr *fmvhdr; 483 struct ubi_fm_eba *fm_eba; 484 int ret, i, j, pool_size, wl_pool_size; 485 size_t fm_pos = 0, fm_size = ubi->fm_size; 486 void *fm_raw = ubi->fm_buf; 487 488 memset(ubi->fm_used, 0, sizeof(ubi->fm_used)); 489 490 fm_pos += sizeof(struct ubi_fm_sb); 491 if (fm_pos >= fm_size) 492 goto fail_bad; 493 494 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 495 fm_pos += sizeof(*fmhdr); 496 if (fm_pos >= fm_size) 497 goto fail_bad; 498 499 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 500 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", 501 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 502 goto fail_bad; 503 } 504 505 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 506 fm_pos += sizeof(*fmpl1); 507 if (fm_pos >= fm_size) 508 goto fail_bad; 509 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { 510 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 511 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); 512 goto fail_bad; 513 } 514 515 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 516 fm_pos += sizeof(*fmpl2); 517 if (fm_pos >= fm_size) 518 goto fail_bad; 519 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { 520 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 521 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); 522 goto fail_bad; 523 } 524 525 pool_size = be16_to_cpu(fmpl1->size); 526 wl_pool_size = be16_to_cpu(fmpl2->size); 527 fm->max_pool_size = be16_to_cpu(fmpl1->max_size); 528 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); 529 530 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 531 ubi_err("bad pool size: %i", pool_size); 532 goto fail_bad; 533 } 534 535 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 536 ubi_err("bad WL pool size: %i", wl_pool_size); 537 goto fail_bad; 538 } 539 540 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 541 fm->max_pool_size < 0) { 542 ubi_err("bad maximal pool size: %i", fm->max_pool_size); 543 goto fail_bad; 544 } 545 546 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 547 fm->max_wl_pool_size < 0) { 548 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); 549 goto fail_bad; 550 } 551 552 /* read EC values from free list */ 553 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { 554 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 555 fm_pos += sizeof(*fmec); 556 if (fm_pos >= fm_size) 557 goto fail_bad; 558 } 559 560 /* read EC values from used list */ 561 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { 562 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 563 fm_pos += sizeof(*fmec); 564 if (fm_pos >= fm_size) 565 goto fail_bad; 566 567 generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used); 568 } 569 570 /* read EC values from scrub list */ 571 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { 572 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 573 fm_pos += sizeof(*fmec); 574 if (fm_pos >= fm_size) 575 goto fail_bad; 576 } 577 578 /* read EC values from erase list */ 579 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { 580 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 581 fm_pos += sizeof(*fmec); 582 if (fm_pos >= fm_size) 583 goto fail_bad; 584 } 585 586 /* Iterate over all volumes and read their EBA table */ 587 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { 588 u32 vol_id, vol_type, used, reserved; 589 590 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 591 fm_pos += sizeof(*fmvhdr); 592 if (fm_pos >= fm_size) 593 goto fail_bad; 594 595 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 596 ubi_err("bad fastmap vol header magic: 0x%x, " \ 597 "expected: 0x%x", 598 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 599 goto fail_bad; 600 } 601 602 vol_id = be32_to_cpu(fmvhdr->vol_id); 603 vol_type = fmvhdr->vol_type; 604 used = be32_to_cpu(fmvhdr->used_ebs); 605 606 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 607 fm_pos += sizeof(*fm_eba); 608 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); 609 if (fm_pos >= fm_size) 610 goto fail_bad; 611 612 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 613 ubi_err("bad fastmap EBA header magic: 0x%x, " \ 614 "expected: 0x%x", 615 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 616 goto fail_bad; 617 } 618 619 reserved = be32_to_cpu(fm_eba->reserved_pebs); 620 ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved); 621 for (j = 0; j < reserved; j++) { 622 int pnum = be32_to_cpu(fm_eba->pnum[j]); 623 624 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) 625 continue; 626 627 if (!__test_and_clear_bit(pnum, ubi->fm_used)) 628 continue; 629 630 /* 631 * We only handle static volumes so used_ebs 632 * needs to be handed in. And we do not assign 633 * the reserved blocks 634 */ 635 if (j >= used) 636 continue; 637 638 ret = assign_aeb_to_av(ubi, pnum, j, vol_id, 639 vol_type, used); 640 if (!ret) 641 continue; 642 643 /* 644 * Nasty: The fastmap claims that the volume 645 * has one block more than it, but that block 646 * is always empty and the other blocks have 647 * the correct number of total LEBs in the 648 * headers. Deal with it. 649 */ 650 if (ret != UBI_IO_FF && j != used - 1) 651 goto fail_bad; 652 ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d", 653 vol_id, j, used); 654 } 655 } 656 657 ret = scan_pool(ubi, fmpl1->pebs, pool_size); 658 if (ret) 659 goto fail; 660 661 ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size); 662 if (ret) 663 goto fail; 664 665#ifdef CHECKME 666 /* 667 * If fastmap is leaking PEBs (must not happen), raise a 668 * fat warning and fall back to scanning mode. 669 * We do this here because in ubi_wl_init() it's too late 670 * and we cannot fall back to scanning. 671 */ 672 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - 673 ai->bad_peb_count - fm->used_blocks)) 674 goto fail_bad; 675#endif 676 677 return 0; 678 679fail_bad: 680 ret = UBI_BAD_FASTMAP; 681fail: 682 return ret; 683} 684 685static int ubi_scan_fastmap(struct ubi_scan_info *ubi, 686 struct ubi_attach_info *ai, 687 int fm_anchor) 688{ 689 struct ubi_fm_sb *fmsb, *fmsb2; 690 struct ubi_vid_hdr *vh; 691 struct ubi_fastmap_layout *fm; 692 int i, used_blocks, pnum, ret = 0; 693 size_t fm_size; 694 __be32 crc, tmp_crc; 695 unsigned long long sqnum = 0; 696 697 fmsb = &ubi->fm_sb; 698 fm = &ubi->fm_layout; 699 700 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); 701 if (ret && ret != UBI_IO_BITFLIPS) 702 goto free_fm_sb; 703 else if (ret == UBI_IO_BITFLIPS) 704 fm->to_be_tortured[0] = 1; 705 706 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 707 ubi_err("bad super block magic: 0x%x, expected: 0x%x", 708 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 709 ret = UBI_BAD_FASTMAP; 710 goto free_fm_sb; 711 } 712 713 if (fmsb->version != UBI_FM_FMT_VERSION) { 714 ubi_err("bad fastmap version: %i, expected: %i", 715 fmsb->version, UBI_FM_FMT_VERSION); 716 ret = UBI_BAD_FASTMAP; 717 goto free_fm_sb; 718 } 719 720 used_blocks = be32_to_cpu(fmsb->used_blocks); 721 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 722 ubi_err("number of fastmap blocks is invalid: %i", used_blocks); 723 ret = UBI_BAD_FASTMAP; 724 goto free_fm_sb; 725 } 726 727 fm_size = ubi->leb_size * used_blocks; 728 if (fm_size != ubi->fm_size) { 729 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, 730 ubi->fm_size); 731 ret = UBI_BAD_FASTMAP; 732 goto free_fm_sb; 733 } 734 735 vh = &ubi->fm_vh; 736 737 for (i = 0; i < used_blocks; i++) { 738 pnum = be32_to_cpu(fmsb->block_loc[i]); 739 740 if (ubi_io_is_bad(ubi, pnum)) { 741 ret = UBI_BAD_FASTMAP; 742 goto free_hdr; 743 } 744 745#ifdef LATER 746 int image_seq; 747 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 748 if (ret && ret != UBI_IO_BITFLIPS) { 749 ubi_err("unable to read fastmap block# %i EC (PEB: %i)", 750 i, pnum); 751 if (ret > 0) 752 ret = UBI_BAD_FASTMAP; 753 goto free_hdr; 754 } else if (ret == UBI_IO_BITFLIPS) 755 fm->to_be_tortured[i] = 1; 756 757 image_seq = be32_to_cpu(ech->image_seq); 758 if (!ubi->image_seq) 759 ubi->image_seq = image_seq; 760 /* 761 * Older UBI implementations have image_seq set to zero, so 762 * we shouldn't fail if image_seq == 0. 763 */ 764 if (image_seq && (image_seq != ubi->image_seq)) { 765 ubi_err("wrong image seq:%d instead of %d", 766 be32_to_cpu(ech->image_seq), ubi->image_seq); 767 ret = UBI_BAD_FASTMAP; 768 goto free_hdr; 769 } 770#endif 771 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 772 if (ret && ret != UBI_IO_BITFLIPS) { 773 ubi_err("unable to read fastmap block# %i (PEB: %i)", 774 i, pnum); 775 goto free_hdr; 776 } 777 778 /* 779 * Mainline code rescans the anchor header. We've done 780 * that already so we merily copy it over. 781 */ 782 if (pnum == fm_anchor) 783 memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm)); 784 785 if (i == 0) { 786 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 787 ubi_err("bad fastmap anchor vol_id: 0x%x," \ 788 " expected: 0x%x", 789 be32_to_cpu(vh->vol_id), 790 UBI_FM_SB_VOLUME_ID); 791 ret = UBI_BAD_FASTMAP; 792 goto free_hdr; 793 } 794 } else { 795 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 796 ubi_err("bad fastmap data vol_id: 0x%x," \ 797 " expected: 0x%x", 798 be32_to_cpu(vh->vol_id), 799 UBI_FM_DATA_VOLUME_ID); 800 ret = UBI_BAD_FASTMAP; 801 goto free_hdr; 802 } 803 } 804 805 if (sqnum < be64_to_cpu(vh->sqnum)) 806 sqnum = be64_to_cpu(vh->sqnum); 807 808 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, 809 ubi->leb_start, ubi->leb_size); 810 if (ret && ret != UBI_IO_BITFLIPS) { 811 ubi_err("unable to read fastmap block# %i (PEB: %i, " \ 812 "err: %i)", i, pnum, ret); 813 goto free_hdr; 814 } 815 } 816 817 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); 818 tmp_crc = be32_to_cpu(fmsb2->data_crc); 819 fmsb2->data_crc = 0; 820 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 821 if (crc != tmp_crc) { 822 ubi_err("fastmap data CRC is invalid"); 823 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); 824 ret = UBI_BAD_FASTMAP; 825 goto free_hdr; 826 } 827 828 fmsb2->sqnum = sqnum; 829 830 fm->used_blocks = used_blocks; 831 832 ret = ubi_attach_fastmap(ubi, ai, fm); 833 if (ret) { 834 if (ret > 0) 835 ret = UBI_BAD_FASTMAP; 836 goto free_hdr; 837 } 838 839 ubi->fm = fm; 840 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 841 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 842 ubi_msg("attached by fastmap %uMB %u blocks", 843 ubi->fsize_mb, ubi->peb_count); 844 ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size); 845 ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); 846 847out: 848 if (ret) 849 ubi_err("Attach by fastmap failed, doing a full scan!"); 850 return ret; 851 852free_hdr: 853free_fm_sb: 854 goto out; 855} 856 857/* 858 * Scan the flash and attempt to attach via fastmap 859 */ 860static void ipl_scan(struct ubi_scan_info *ubi) 861{ 862 unsigned int pnum; 863 int res; 864 865 /* 866 * Scan first for the fastmap super block 867 */ 868 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { 869 res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum); 870 /* 871 * We ignore errors here as we are meriliy scanning 872 * the headers. 873 */ 874 if (res != UBI_FASTMAP_ANCHOR) 875 continue; 876 877 /* 878 * If fastmap is disabled, continue scanning. This 879 * might happen because the previous attempt failed or 880 * the caller disabled it right away. 881 */ 882 if (!ubi->fm_enabled) 883 continue; 884 885 /* 886 * Try to attach the fastmap, if that fails continue 887 * scanning. 888 */ 889 if (!ubi_scan_fastmap(ubi, NULL, pnum)) 890 return; 891 /* 892 * Fastmap failed. Clear everything we have and start 893 * over. We are paranoid and do not trust anything. 894 */ 895 memset(ubi->volinfo, 0, sizeof(ubi->volinfo)); 896 pnum = 0; 897 break; 898 } 899 900 /* 901 * Continue scanning, ignore errors, we might find what we are 902 * looking for, 903 */ 904 for (; pnum < ubi->peb_count; pnum++) 905 ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum); 906} 907 908/* 909 * Load a logical block of a volume into memory 910 */ 911static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr, 912 struct ubi_vol_info *vi, u32 vol_id, u32 lnum, 913 u32 last) 914{ 915 struct ubi_vid_hdr *vh, *vrepl; 916 u32 pnum, crc, dlen; 917 918retry: 919 /* 920 * If this is a fastmap run, we try to rescan full, otherwise 921 * we simply give up. 922 */ 923 if (!test_bit(lnum, vi->found)) { 924 ubi_warn("LEB %d of %d is missing", lnum, last); 925 return -EINVAL; 926 } 927 928 pnum = vi->lebs_to_pebs[lnum]; 929 930 ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum); 931 932 if (ubi_io_is_bad(ubi, pnum)) { 933 ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum); 934 return -EINVAL; 935 } 936 937 if (test_bit(pnum, ubi->corrupt)) 938 goto find_other; 939 940 /* 941 * Lets try to read that block 942 */ 943 vh = ubi->blockinfo + pnum; 944 945 if (!test_bit(pnum, ubi->scanned)) { 946 ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id, 947 lnum, pnum); 948 if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum)) 949 goto find_other; 950 } 951 952 /* 953 * Check, if the total number of blocks is correct 954 */ 955 if (be32_to_cpu(vh->used_ebs) != last) { 956 ubi_dbg("Block count mismatch."); 957 ubi_dbg("vh->used_ebs: %d nrblocks: %d", 958 be32_to_cpu(vh->used_ebs), last); 959 generic_set_bit(pnum, ubi->corrupt); 960 goto find_other; 961 } 962 963 /* 964 * Get the data length of this block. 965 */ 966 dlen = be32_to_cpu(vh->data_size); 967 968 /* 969 * Read the data into RAM. We ignore the return value 970 * here as the only thing which might go wrong are 971 * bitflips. Try nevertheless. 972 */ 973 ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen); 974 975 /* Calculate CRC over the data */ 976 crc = crc32(UBI_CRC32_INIT, laddr, dlen); 977 978 if (crc != be32_to_cpu(vh->data_crc)) { 979 ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id, 980 lnum, pnum); 981 generic_set_bit(pnum, ubi->corrupt); 982 goto find_other; 983 } 984 985 /* We are good. Return the data length we read */ 986 return dlen; 987 988find_other: 989 ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum); 990 generic_clear_bit(lnum, vi->found); 991 vrepl = NULL; 992 993 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 994 struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum; 995 u32 t_vol_id = be32_to_cpu(tmp->vol_id); 996 u32 t_lnum = be32_to_cpu(tmp->lnum); 997 998 if (test_bit(pnum, ubi->corrupt)) 999 continue; 1000 1001 if (t_vol_id != vol_id || t_lnum != lnum) 1002 continue; 1003 1004 if (!test_bit(pnum, ubi->scanned)) { 1005 ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", 1006 vol_id, lnum, pnum); 1007 if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum)) 1008 continue; 1009 } 1010 1011 /* 1012 * We found one. If its the first, assign it otherwise 1013 * compare the sqnum 1014 */ 1015 generic_set_bit(lnum, vi->found); 1016 1017 if (!vrepl) { 1018 vrepl = tmp; 1019 continue; 1020 } 1021 1022 if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum)) 1023 vrepl = tmp; 1024 } 1025 1026 if (vrepl) { 1027 /* Update the vi table */ 1028 pnum = vrepl - ubi->blockinfo; 1029 vi->lebs_to_pebs[lnum] = pnum; 1030 ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum); 1031 vh = vrepl; 1032 } 1033 goto retry; 1034} 1035 1036/* 1037 * Load a volume into RAM 1038 */ 1039static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr) 1040{ 1041 struct ubi_vol_info *vi; 1042 u32 lnum, last, len; 1043 1044 if (vol_id >= UBI_SPL_VOL_IDS) 1045 return -EINVAL; 1046 1047 len = 0; 1048 vi = ubi->volinfo + vol_id; 1049 last = vi->last_block + 1; 1050 1051 /* Read the blocks to RAM, check CRC */ 1052 for (lnum = 0 ; lnum < last; lnum++) { 1053 int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last); 1054 1055 if (res < 0) { 1056 ubi_warn("Failed to load volume %u", vol_id); 1057 return res; 1058 } 1059 /* res is the data length of the read block */ 1060 laddr += res; 1061 len += res; 1062 } 1063 return len; 1064} 1065 1066int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols, 1067 int nrvols) 1068{ 1069 struct ubi_scan_info *ubi = info->ubi; 1070 int res, i, fastmap = info->fastmap; 1071 u32 fsize; 1072 1073retry: 1074 /* 1075 * We do a partial initializiation of @ubi. Cleaning fm_buf is 1076 * not necessary. 1077 */ 1078 memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf)); 1079 1080 ubi->read = info->read; 1081 1082 /* Precalculate the offsets */ 1083 ubi->vid_offset = info->vid_offset; 1084 ubi->leb_start = info->leb_start; 1085 ubi->leb_size = info->peb_size - ubi->leb_start; 1086 ubi->peb_count = info->peb_count; 1087 ubi->peb_offset = info->peb_offset; 1088 1089#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME 1090 ubi->vtbl_valid = 0; 1091#endif 1092 1093 fsize = info->peb_size * info->peb_count; 1094 ubi->fsize_mb = fsize >> 20; 1095 1096 /* Fastmap init */ 1097 ubi->fm_size = ubi_calc_fm_size(ubi); 1098 ubi->fm_enabled = fastmap; 1099 1100 for (i = 0; i < nrvols; i++) { 1101 struct ubispl_load *lv = lvols + i; 1102 1103 generic_set_bit(lv->vol_id, ubi->toload); 1104 } 1105 1106 ipl_scan(ubi); 1107 1108 for (i = 0; i < nrvols; i++) { 1109 struct ubispl_load *lv = lvols + i; 1110 1111#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME 1112 if (lv->vol_id == -1) { 1113 for (int j = 0; j < UBI_SPL_VOL_IDS; j++) { 1114 int len = be16_to_cpu(ubi->vtbl[j].name_len); 1115 1116 if (strncmp(lv->name, 1117 ubi->vtbl[j].name, 1118 len) == 0) { 1119 lv->vol_id = j; 1120 break; 1121 } 1122 } 1123 } 1124 ubi_msg("Loading VolName %s (VolId #%d)", lv->name, lv->vol_id); 1125#else 1126 ubi_msg("Loading VolId #%d", lv->vol_id); 1127#endif 1128 res = ipl_load(ubi, lv->vol_id, lv->load_addr); 1129 if (res < 0) { 1130 if (fastmap) { 1131 fastmap = 0; 1132 goto retry; 1133 } 1134 ubi_warn("Failed"); 1135 return res; 1136 } 1137 } 1138 return 0; 1139} 1140