zio.c revision 205133
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#include <sys/zfs_context.h> 27#include <sys/fm/fs/zfs.h> 28#include <sys/spa.h> 29#include <sys/txg.h> 30#include <sys/spa_impl.h> 31#include <sys/vdev_impl.h> 32#include <sys/zio_impl.h> 33#include <sys/zio_compress.h> 34#include <sys/zio_checksum.h> 35 36/* 37 * ========================================================================== 38 * I/O priority table 39 * ========================================================================== 40 */ 41uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 42 0, /* ZIO_PRIORITY_NOW */ 43 0, /* ZIO_PRIORITY_SYNC_READ */ 44 0, /* ZIO_PRIORITY_SYNC_WRITE */ 45 6, /* ZIO_PRIORITY_ASYNC_READ */ 46 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 47 4, /* ZIO_PRIORITY_FREE */ 48 0, /* ZIO_PRIORITY_CACHE_FILL */ 49 0, /* ZIO_PRIORITY_LOG_WRITE */ 50 10, /* ZIO_PRIORITY_RESILVER */ 51 20, /* ZIO_PRIORITY_SCRUB */ 52}; 53 54/* 55 * ========================================================================== 56 * I/O type descriptions 57 * ========================================================================== 58 */ 59char *zio_type_name[ZIO_TYPES] = { 60 "null", "read", "write", "free", "claim", "ioctl" }; 61 62#define SYNC_PASS_DEFERRED_FREE 1 /* defer frees after this pass */ 63#define SYNC_PASS_DONT_COMPRESS 4 /* don't compress after this pass */ 64#define SYNC_PASS_REWRITE 1 /* rewrite new bps after this pass */ 65 66/* 67 * ========================================================================== 68 * I/O kmem caches 69 * ========================================================================== 70 */ 71kmem_cache_t *zio_cache; 72#ifdef ZIO_USE_UMA 73kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 74kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 75#endif 76 77#ifdef _KERNEL 78extern vmem_t *zio_alloc_arena; 79#endif 80 81/* 82 * An allocating zio is one that either currently has the DVA allocate 83 * stage set or will have it later in its lifetime. 84 */ 85#define IO_IS_ALLOCATING(zio) \ 86 ((zio)->io_orig_pipeline & (1U << ZIO_STAGE_DVA_ALLOCATE)) 87 88void 89zio_init(void) 90{ 91#ifdef ZIO_USE_UMA 92 size_t c; 93#endif 94 zio_cache = kmem_cache_create("zio_cache", sizeof (zio_t), 0, 95 NULL, NULL, NULL, NULL, NULL, 0); 96 97#ifdef ZIO_USE_UMA 98 /* 99 * For small buffers, we want a cache for each multiple of 100 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 101 * for each quarter-power of 2. For large buffers, we want 102 * a cache for each multiple of PAGESIZE. 103 */ 104 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 105 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 106 size_t p2 = size; 107 size_t align = 0; 108 109 while (p2 & (p2 - 1)) 110 p2 &= p2 - 1; 111 112 if (size <= 4 * SPA_MINBLOCKSIZE) { 113 align = SPA_MINBLOCKSIZE; 114 } else if (P2PHASE(size, PAGESIZE) == 0) { 115 align = PAGESIZE; 116 } else if (P2PHASE(size, p2 >> 2) == 0) { 117 align = p2 >> 2; 118 } 119 120 if (align != 0) { 121 char name[36]; 122 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 123 zio_buf_cache[c] = kmem_cache_create(name, size, 124 align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 125 126 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 127 zio_data_buf_cache[c] = kmem_cache_create(name, size, 128 align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 129 } 130 } 131 132 while (--c != 0) { 133 ASSERT(zio_buf_cache[c] != NULL); 134 if (zio_buf_cache[c - 1] == NULL) 135 zio_buf_cache[c - 1] = zio_buf_cache[c]; 136 137 ASSERT(zio_data_buf_cache[c] != NULL); 138 if (zio_data_buf_cache[c - 1] == NULL) 139 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 140 } 141#endif 142 143 zio_inject_init(); 144} 145 146void 147zio_fini(void) 148{ 149#ifdef ZIO_USE_UMA 150 size_t c; 151 kmem_cache_t *last_cache = NULL; 152 kmem_cache_t *last_data_cache = NULL; 153 154 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 155 if (zio_buf_cache[c] != last_cache) { 156 last_cache = zio_buf_cache[c]; 157 kmem_cache_destroy(zio_buf_cache[c]); 158 } 159 zio_buf_cache[c] = NULL; 160 161 if (zio_data_buf_cache[c] != last_data_cache) { 162 last_data_cache = zio_data_buf_cache[c]; 163 kmem_cache_destroy(zio_data_buf_cache[c]); 164 } 165 zio_data_buf_cache[c] = NULL; 166 } 167#endif 168 169 kmem_cache_destroy(zio_cache); 170 171 zio_inject_fini(); 172} 173 174/* 175 * ========================================================================== 176 * Allocate and free I/O buffers 177 * ========================================================================== 178 */ 179 180/* 181 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 182 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 183 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 184 * excess / transient data in-core during a crashdump. 185 */ 186void * 187zio_buf_alloc(size_t size) 188{ 189#ifdef ZIO_USE_UMA 190 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 191 192 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 193 194 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 195#else 196 return (kmem_alloc(size, KM_SLEEP)); 197#endif 198} 199 200/* 201 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 202 * crashdump if the kernel panics. This exists so that we will limit the amount 203 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 204 * of kernel heap dumped to disk when the kernel panics) 205 */ 206void * 207zio_data_buf_alloc(size_t size) 208{ 209#ifdef ZIO_USE_UMA 210 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 211 212 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 213 214 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 215#else 216 return (kmem_alloc(size, KM_SLEEP)); 217#endif 218} 219 220void 221zio_buf_free(void *buf, size_t size) 222{ 223#ifdef ZIO_USE_UMA 224 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 225 226 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 227 228 kmem_cache_free(zio_buf_cache[c], buf); 229#else 230 kmem_free(buf, size); 231#endif 232} 233 234void 235zio_data_buf_free(void *buf, size_t size) 236{ 237#ifdef ZIO_USE_UMA 238 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 239 240 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 241 242 kmem_cache_free(zio_data_buf_cache[c], buf); 243#else 244 kmem_free(buf, size); 245#endif 246} 247 248/* 249 * ========================================================================== 250 * Push and pop I/O transform buffers 251 * ========================================================================== 252 */ 253static void 254zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 255 zio_transform_func_t *transform) 256{ 257 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 258 259 zt->zt_orig_data = zio->io_data; 260 zt->zt_orig_size = zio->io_size; 261 zt->zt_bufsize = bufsize; 262 zt->zt_transform = transform; 263 264 zt->zt_next = zio->io_transform_stack; 265 zio->io_transform_stack = zt; 266 267 zio->io_data = data; 268 zio->io_size = size; 269} 270 271static void 272zio_pop_transforms(zio_t *zio) 273{ 274 zio_transform_t *zt; 275 276 while ((zt = zio->io_transform_stack) != NULL) { 277 if (zt->zt_transform != NULL) 278 zt->zt_transform(zio, 279 zt->zt_orig_data, zt->zt_orig_size); 280 281 zio_buf_free(zio->io_data, zt->zt_bufsize); 282 283 zio->io_data = zt->zt_orig_data; 284 zio->io_size = zt->zt_orig_size; 285 zio->io_transform_stack = zt->zt_next; 286 287 kmem_free(zt, sizeof (zio_transform_t)); 288 } 289} 290 291/* 292 * ========================================================================== 293 * I/O transform callbacks for subblocks and decompression 294 * ========================================================================== 295 */ 296static void 297zio_subblock(zio_t *zio, void *data, uint64_t size) 298{ 299 ASSERT(zio->io_size > size); 300 301 if (zio->io_type == ZIO_TYPE_READ) 302 bcopy(zio->io_data, data, size); 303} 304 305static void 306zio_decompress(zio_t *zio, void *data, uint64_t size) 307{ 308 if (zio->io_error == 0 && 309 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 310 zio->io_data, zio->io_size, data, size) != 0) 311 zio->io_error = EIO; 312} 313 314/* 315 * ========================================================================== 316 * I/O parent/child relationships and pipeline interlocks 317 * ========================================================================== 318 */ 319 320static void 321zio_add_child(zio_t *pio, zio_t *zio) 322{ 323 mutex_enter(&pio->io_lock); 324 if (zio->io_stage < ZIO_STAGE_READY) 325 pio->io_children[zio->io_child_type][ZIO_WAIT_READY]++; 326 if (zio->io_stage < ZIO_STAGE_DONE) 327 pio->io_children[zio->io_child_type][ZIO_WAIT_DONE]++; 328 zio->io_sibling_prev = NULL; 329 zio->io_sibling_next = pio->io_child; 330 if (pio->io_child != NULL) 331 pio->io_child->io_sibling_prev = zio; 332 pio->io_child = zio; 333 zio->io_parent = pio; 334 mutex_exit(&pio->io_lock); 335} 336 337static void 338zio_remove_child(zio_t *pio, zio_t *zio) 339{ 340 zio_t *next, *prev; 341 342 ASSERT(zio->io_parent == pio); 343 344 mutex_enter(&pio->io_lock); 345 next = zio->io_sibling_next; 346 prev = zio->io_sibling_prev; 347 if (next != NULL) 348 next->io_sibling_prev = prev; 349 if (prev != NULL) 350 prev->io_sibling_next = next; 351 if (pio->io_child == zio) 352 pio->io_child = next; 353 mutex_exit(&pio->io_lock); 354} 355 356static boolean_t 357zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 358{ 359 uint64_t *countp = &zio->io_children[child][wait]; 360 boolean_t waiting = B_FALSE; 361 362 mutex_enter(&zio->io_lock); 363 ASSERT(zio->io_stall == NULL); 364 if (*countp != 0) { 365 zio->io_stage--; 366 zio->io_stall = countp; 367 waiting = B_TRUE; 368 } 369 mutex_exit(&zio->io_lock); 370 371 return (waiting); 372} 373 374static void 375zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 376{ 377 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 378 int *errorp = &pio->io_child_error[zio->io_child_type]; 379 380 mutex_enter(&pio->io_lock); 381 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 382 *errorp = zio_worst_error(*errorp, zio->io_error); 383 pio->io_reexecute |= zio->io_reexecute; 384 ASSERT3U(*countp, >, 0); 385 if (--*countp == 0 && pio->io_stall == countp) { 386 pio->io_stall = NULL; 387 mutex_exit(&pio->io_lock); 388 zio_execute(pio); 389 } else { 390 mutex_exit(&pio->io_lock); 391 } 392} 393 394static void 395zio_inherit_child_errors(zio_t *zio, enum zio_child c) 396{ 397 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 398 zio->io_error = zio->io_child_error[c]; 399} 400 401/* 402 * ========================================================================== 403 * Create the various types of I/O (read, write, free, etc) 404 * ========================================================================== 405 */ 406static zio_t * 407zio_create(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 408 void *data, uint64_t size, zio_done_func_t *done, void *private, 409 zio_type_t type, int priority, int flags, vdev_t *vd, uint64_t offset, 410 const zbookmark_t *zb, uint8_t stage, uint32_t pipeline) 411{ 412 zio_t *zio; 413 414 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 415 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 416 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 417 418 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 419 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 420 ASSERT(vd || stage == ZIO_STAGE_OPEN); 421 422 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 423 bzero(zio, sizeof (zio_t)); 424 425 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 426 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 427 428 if (vd != NULL) 429 zio->io_child_type = ZIO_CHILD_VDEV; 430 else if (flags & ZIO_FLAG_GANG_CHILD) 431 zio->io_child_type = ZIO_CHILD_GANG; 432 else 433 zio->io_child_type = ZIO_CHILD_LOGICAL; 434 435 if (bp != NULL) { 436 zio->io_bp = bp; 437 zio->io_bp_copy = *bp; 438 zio->io_bp_orig = *bp; 439 if (type != ZIO_TYPE_WRITE) 440 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 441 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 442 if (BP_IS_GANG(bp)) 443 pipeline |= ZIO_GANG_STAGES; 444 zio->io_logical = zio; 445 } 446 } 447 448 zio->io_spa = spa; 449 zio->io_txg = txg; 450 zio->io_data = data; 451 zio->io_size = size; 452 zio->io_done = done; 453 zio->io_private = private; 454 zio->io_type = type; 455 zio->io_priority = priority; 456 zio->io_vd = vd; 457 zio->io_offset = offset; 458 zio->io_orig_flags = zio->io_flags = flags; 459 zio->io_orig_stage = zio->io_stage = stage; 460 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 461 462 if (zb != NULL) 463 zio->io_bookmark = *zb; 464 465 if (pio != NULL) { 466 /* 467 * Logical I/Os can have logical, gang, or vdev children. 468 * Gang I/Os can have gang or vdev children. 469 * Vdev I/Os can only have vdev children. 470 * The following ASSERT captures all of these constraints. 471 */ 472 ASSERT(zio->io_child_type <= pio->io_child_type); 473 if (zio->io_logical == NULL) 474 zio->io_logical = pio->io_logical; 475 zio_add_child(pio, zio); 476 } 477 478 return (zio); 479} 480 481static void 482zio_destroy(zio_t *zio) 483{ 484 spa_t *spa = zio->io_spa; 485 uint8_t async_root = zio->io_async_root; 486 487 mutex_destroy(&zio->io_lock); 488 cv_destroy(&zio->io_cv); 489 kmem_cache_free(zio_cache, zio); 490 491 if (async_root) { 492 mutex_enter(&spa->spa_async_root_lock); 493 if (--spa->spa_async_root_count == 0) 494 cv_broadcast(&spa->spa_async_root_cv); 495 mutex_exit(&spa->spa_async_root_lock); 496 } 497} 498 499zio_t * 500zio_null(zio_t *pio, spa_t *spa, zio_done_func_t *done, void *private, 501 int flags) 502{ 503 zio_t *zio; 504 505 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 506 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL, 507 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 508 509 return (zio); 510} 511 512zio_t * 513zio_root(spa_t *spa, zio_done_func_t *done, void *private, int flags) 514{ 515 return (zio_null(NULL, spa, done, private, flags)); 516} 517 518zio_t * 519zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 520 void *data, uint64_t size, zio_done_func_t *done, void *private, 521 int priority, int flags, const zbookmark_t *zb) 522{ 523 zio_t *zio; 524 525 zio = zio_create(pio, spa, bp->blk_birth, (blkptr_t *)bp, 526 data, size, done, private, 527 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 528 ZIO_STAGE_OPEN, ZIO_READ_PIPELINE); 529 530 return (zio); 531} 532 533zio_t * 534zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 535 void *data, uint64_t size, zio_prop_t *zp, 536 zio_done_func_t *ready, zio_done_func_t *done, void *private, 537 int priority, int flags, const zbookmark_t *zb) 538{ 539 zio_t *zio; 540 541 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 542 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 543 zp->zp_compress >= ZIO_COMPRESS_OFF && 544 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 545 zp->zp_type < DMU_OT_NUMTYPES && 546 zp->zp_level < 32 && 547 zp->zp_ndvas > 0 && 548 zp->zp_ndvas <= spa_max_replication(spa)); 549 ASSERT(ready != NULL); 550 551 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 552 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 553 ZIO_STAGE_OPEN, ZIO_WRITE_PIPELINE); 554 555 zio->io_ready = ready; 556 zio->io_prop = *zp; 557 558 return (zio); 559} 560 561zio_t * 562zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 563 uint64_t size, zio_done_func_t *done, void *private, int priority, 564 int flags, zbookmark_t *zb) 565{ 566 zio_t *zio; 567 568 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 569 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 570 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 571 572 return (zio); 573} 574 575zio_t * 576zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 577 zio_done_func_t *done, void *private, int flags) 578{ 579 zio_t *zio; 580 581 ASSERT(!BP_IS_HOLE(bp)); 582 583 if (bp->blk_fill == BLK_FILL_ALREADY_FREED) 584 return (zio_null(pio, spa, NULL, NULL, flags)); 585 586 if (txg == spa->spa_syncing_txg && 587 spa_sync_pass(spa) > SYNC_PASS_DEFERRED_FREE) { 588 bplist_enqueue_deferred(&spa->spa_sync_bplist, bp); 589 return (zio_null(pio, spa, NULL, NULL, flags)); 590 } 591 592 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 593 done, private, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags, 594 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 595 596 return (zio); 597} 598 599zio_t * 600zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 601 zio_done_func_t *done, void *private, int flags) 602{ 603 zio_t *zio; 604 605 /* 606 * A claim is an allocation of a specific block. Claims are needed 607 * to support immediate writes in the intent log. The issue is that 608 * immediate writes contain committed data, but in a txg that was 609 * *not* committed. Upon opening the pool after an unclean shutdown, 610 * the intent log claims all blocks that contain immediate write data 611 * so that the SPA knows they're in use. 612 * 613 * All claims *must* be resolved in the first txg -- before the SPA 614 * starts allocating blocks -- so that nothing is allocated twice. 615 */ 616 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 617 ASSERT3U(spa_first_txg(spa), <=, txg); 618 619 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 620 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 621 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 622 623 return (zio); 624} 625 626zio_t * 627zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 628 zio_done_func_t *done, void *private, int priority, int flags) 629{ 630 zio_t *zio; 631 int c; 632 633 if (vd->vdev_children == 0) { 634 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 635 ZIO_TYPE_IOCTL, priority, flags, vd, 0, NULL, 636 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 637 638 zio->io_cmd = cmd; 639 } else { 640 zio = zio_null(pio, spa, NULL, NULL, flags); 641 642 for (c = 0; c < vd->vdev_children; c++) 643 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 644 done, private, priority, flags)); 645 } 646 647 return (zio); 648} 649 650zio_t * 651zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 652 void *data, int checksum, zio_done_func_t *done, void *private, 653 int priority, int flags, boolean_t labels) 654{ 655 zio_t *zio; 656 657 ASSERT(vd->vdev_children == 0); 658 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 659 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 660 ASSERT3U(offset + size, <=, vd->vdev_psize); 661 662 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 663 ZIO_TYPE_READ, priority, flags, vd, offset, NULL, 664 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 665 666 zio->io_prop.zp_checksum = checksum; 667 668 return (zio); 669} 670 671zio_t * 672zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 673 void *data, int checksum, zio_done_func_t *done, void *private, 674 int priority, int flags, boolean_t labels) 675{ 676 zio_t *zio; 677 678 ASSERT(vd->vdev_children == 0); 679 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 680 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 681 ASSERT3U(offset + size, <=, vd->vdev_psize); 682 683 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 684 ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, 685 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 686 687 zio->io_prop.zp_checksum = checksum; 688 689 if (zio_checksum_table[checksum].ci_zbt) { 690 /* 691 * zbt checksums are necessarily destructive -- they modify 692 * the end of the write buffer to hold the verifier/checksum. 693 * Therefore, we must make a local copy in case the data is 694 * being written to multiple places in parallel. 695 */ 696 void *wbuf = zio_buf_alloc(size); 697 bcopy(data, wbuf, size); 698 zio_push_transform(zio, wbuf, size, size, NULL); 699 } 700 701 return (zio); 702} 703 704/* 705 * Create a child I/O to do some work for us. 706 */ 707zio_t * 708zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 709 void *data, uint64_t size, int type, int priority, int flags, 710 zio_done_func_t *done, void *private) 711{ 712 uint32_t pipeline = ZIO_VDEV_CHILD_PIPELINE; 713 zio_t *zio; 714 715 ASSERT(vd->vdev_parent == 716 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 717 718 if (type == ZIO_TYPE_READ && bp != NULL) { 719 /* 720 * If we have the bp, then the child should perform the 721 * checksum and the parent need not. This pushes error 722 * detection as close to the leaves as possible and 723 * eliminates redundant checksums in the interior nodes. 724 */ 725 pipeline |= 1U << ZIO_STAGE_CHECKSUM_VERIFY; 726 pio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 727 } 728 729 if (vd->vdev_children == 0) 730 offset += VDEV_LABEL_START_SIZE; 731 732 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 733 done, private, type, priority, 734 (pio->io_flags & ZIO_FLAG_VDEV_INHERIT) | 735 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | flags, 736 vd, offset, &pio->io_bookmark, 737 ZIO_STAGE_VDEV_IO_START - 1, pipeline); 738 739 return (zio); 740} 741 742zio_t * 743zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 744 int type, int priority, int flags, zio_done_func_t *done, void *private) 745{ 746 zio_t *zio; 747 748 ASSERT(vd->vdev_ops->vdev_op_leaf); 749 750 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 751 data, size, done, private, type, priority, 752 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY, 753 vd, offset, NULL, 754 ZIO_STAGE_VDEV_IO_START - 1, ZIO_VDEV_CHILD_PIPELINE); 755 756 return (zio); 757} 758 759void 760zio_flush(zio_t *zio, vdev_t *vd) 761{ 762 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 763 NULL, NULL, ZIO_PRIORITY_NOW, 764 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 765} 766 767/* 768 * ========================================================================== 769 * Prepare to read and write logical blocks 770 * ========================================================================== 771 */ 772 773static int 774zio_read_bp_init(zio_t *zio) 775{ 776 blkptr_t *bp = zio->io_bp; 777 778 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && zio->io_logical == zio) { 779 uint64_t csize = BP_GET_PSIZE(bp); 780 void *cbuf = zio_buf_alloc(csize); 781 782 zio_push_transform(zio, cbuf, csize, csize, zio_decompress); 783 } 784 785 if (!dmu_ot[BP_GET_TYPE(bp)].ot_metadata && BP_GET_LEVEL(bp) == 0) 786 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 787 788 return (ZIO_PIPELINE_CONTINUE); 789} 790 791static int 792zio_write_bp_init(zio_t *zio) 793{ 794 zio_prop_t *zp = &zio->io_prop; 795 int compress = zp->zp_compress; 796 blkptr_t *bp = zio->io_bp; 797 void *cbuf; 798 uint64_t lsize = zio->io_size; 799 uint64_t csize = lsize; 800 uint64_t cbufsize = 0; 801 int pass = 1; 802 803 /* 804 * If our children haven't all reached the ready stage, 805 * wait for them and then repeat this pipeline stage. 806 */ 807 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 808 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 809 return (ZIO_PIPELINE_STOP); 810 811 if (!IO_IS_ALLOCATING(zio)) 812 return (ZIO_PIPELINE_CONTINUE); 813 814 ASSERT(compress != ZIO_COMPRESS_INHERIT); 815 816 if (bp->blk_birth == zio->io_txg) { 817 /* 818 * We're rewriting an existing block, which means we're 819 * working on behalf of spa_sync(). For spa_sync() to 820 * converge, it must eventually be the case that we don't 821 * have to allocate new blocks. But compression changes 822 * the blocksize, which forces a reallocate, and makes 823 * convergence take longer. Therefore, after the first 824 * few passes, stop compressing to ensure convergence. 825 */ 826 pass = spa_sync_pass(zio->io_spa); 827 ASSERT(pass > 1); 828 829 if (pass > SYNC_PASS_DONT_COMPRESS) 830 compress = ZIO_COMPRESS_OFF; 831 832 /* 833 * Only MOS (objset 0) data should need to be rewritten. 834 */ 835 ASSERT(zio->io_logical->io_bookmark.zb_objset == 0); 836 837 /* Make sure someone doesn't change their mind on overwrites */ 838 ASSERT(MIN(zp->zp_ndvas + BP_IS_GANG(bp), 839 spa_max_replication(zio->io_spa)) == BP_GET_NDVAS(bp)); 840 } 841 842 if (compress != ZIO_COMPRESS_OFF) { 843 if (!zio_compress_data(compress, zio->io_data, zio->io_size, 844 &cbuf, &csize, &cbufsize)) { 845 compress = ZIO_COMPRESS_OFF; 846 } else if (csize != 0) { 847 zio_push_transform(zio, cbuf, csize, cbufsize, NULL); 848 } 849 } 850 851 /* 852 * The final pass of spa_sync() must be all rewrites, but the first 853 * few passes offer a trade-off: allocating blocks defers convergence, 854 * but newly allocated blocks are sequential, so they can be written 855 * to disk faster. Therefore, we allow the first few passes of 856 * spa_sync() to allocate new blocks, but force rewrites after that. 857 * There should only be a handful of blocks after pass 1 in any case. 858 */ 859 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize && 860 pass > SYNC_PASS_REWRITE) { 861 ASSERT(csize != 0); 862 uint32_t gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 863 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 864 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 865 } else { 866 BP_ZERO(bp); 867 zio->io_pipeline = ZIO_WRITE_PIPELINE; 868 } 869 870 if (csize == 0) { 871 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 872 } else { 873 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 874 BP_SET_LSIZE(bp, lsize); 875 BP_SET_PSIZE(bp, csize); 876 BP_SET_COMPRESS(bp, compress); 877 BP_SET_CHECKSUM(bp, zp->zp_checksum); 878 BP_SET_TYPE(bp, zp->zp_type); 879 BP_SET_LEVEL(bp, zp->zp_level); 880 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 881 } 882 883 return (ZIO_PIPELINE_CONTINUE); 884} 885 886/* 887 * ========================================================================== 888 * Execute the I/O pipeline 889 * ========================================================================== 890 */ 891 892static void 893zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q) 894{ 895 zio_type_t t = zio->io_type; 896 897 /* 898 * If we're a config writer, the normal issue and interrupt threads 899 * may all be blocked waiting for the config lock. In this case, 900 * select the otherwise-unused taskq for ZIO_TYPE_NULL. 901 */ 902 if (zio->io_flags & ZIO_FLAG_CONFIG_WRITER) 903 t = ZIO_TYPE_NULL; 904 905 /* 906 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 907 */ 908 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 909 t = ZIO_TYPE_NULL; 910 911 (void) taskq_dispatch(zio->io_spa->spa_zio_taskq[t][q], 912 (task_func_t *)zio_execute, zio, TQ_SLEEP); 913} 914 915static boolean_t 916zio_taskq_member(zio_t *zio, enum zio_taskq_type q) 917{ 918 kthread_t *executor = zio->io_executor; 919 spa_t *spa = zio->io_spa; 920 921 for (zio_type_t t = 0; t < ZIO_TYPES; t++) 922 if (taskq_member(spa->spa_zio_taskq[t][q], executor)) 923 return (B_TRUE); 924 925 return (B_FALSE); 926} 927 928static int 929zio_issue_async(zio_t *zio) 930{ 931 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE); 932 933 return (ZIO_PIPELINE_STOP); 934} 935 936void 937zio_interrupt(zio_t *zio) 938{ 939 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT); 940} 941 942/* 943 * Execute the I/O pipeline until one of the following occurs: 944 * (1) the I/O completes; (2) the pipeline stalls waiting for 945 * dependent child I/Os; (3) the I/O issues, so we're waiting 946 * for an I/O completion interrupt; (4) the I/O is delegated by 947 * vdev-level caching or aggregation; (5) the I/O is deferred 948 * due to vdev-level queueing; (6) the I/O is handed off to 949 * another thread. In all cases, the pipeline stops whenever 950 * there's no CPU work; it never burns a thread in cv_wait(). 951 * 952 * There's no locking on io_stage because there's no legitimate way 953 * for multiple threads to be attempting to process the same I/O. 954 */ 955static zio_pipe_stage_t *zio_pipeline[ZIO_STAGES]; 956 957void 958zio_execute(zio_t *zio) 959{ 960 zio->io_executor = curthread; 961 962 while (zio->io_stage < ZIO_STAGE_DONE) { 963 uint32_t pipeline = zio->io_pipeline; 964 zio_stage_t stage = zio->io_stage; 965 int rv; 966 967 ASSERT(!MUTEX_HELD(&zio->io_lock)); 968 969 while (((1U << ++stage) & pipeline) == 0) 970 continue; 971 972 ASSERT(stage <= ZIO_STAGE_DONE); 973 ASSERT(zio->io_stall == NULL); 974 975 /* 976 * If we are in interrupt context and this pipeline stage 977 * will grab a config lock that is held across I/O, 978 * issue async to avoid deadlock. 979 */ 980 if (((1U << stage) & ZIO_CONFIG_LOCK_BLOCKING_STAGES) && 981 zio->io_vd == NULL && 982 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 983 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE); 984 return; 985 } 986 987 zio->io_stage = stage; 988 rv = zio_pipeline[stage](zio); 989 990 if (rv == ZIO_PIPELINE_STOP) 991 return; 992 993 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 994 } 995} 996 997/* 998 * ========================================================================== 999 * Initiate I/O, either sync or async 1000 * ========================================================================== 1001 */ 1002int 1003zio_wait(zio_t *zio) 1004{ 1005 int error; 1006 1007 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1008 ASSERT(zio->io_executor == NULL); 1009 1010 zio->io_waiter = curthread; 1011 1012 zio_execute(zio); 1013 1014 mutex_enter(&zio->io_lock); 1015 while (zio->io_executor != NULL) 1016 cv_wait(&zio->io_cv, &zio->io_lock); 1017 mutex_exit(&zio->io_lock); 1018 1019 error = zio->io_error; 1020 zio_destroy(zio); 1021 1022 return (error); 1023} 1024 1025void 1026zio_nowait(zio_t *zio) 1027{ 1028 ASSERT(zio->io_executor == NULL); 1029 1030 if (zio->io_parent == NULL && zio->io_child_type == ZIO_CHILD_LOGICAL) { 1031 /* 1032 * This is a logical async I/O with no parent to wait for it. 1033 * Attach it to the pool's global async root zio so that 1034 * spa_unload() has a way of waiting for async I/O to finish. 1035 */ 1036 spa_t *spa = zio->io_spa; 1037 zio->io_async_root = B_TRUE; 1038 mutex_enter(&spa->spa_async_root_lock); 1039 spa->spa_async_root_count++; 1040 mutex_exit(&spa->spa_async_root_lock); 1041 } 1042 1043 zio_execute(zio); 1044} 1045 1046/* 1047 * ========================================================================== 1048 * Reexecute or suspend/resume failed I/O 1049 * ========================================================================== 1050 */ 1051 1052static void 1053zio_reexecute(zio_t *pio) 1054{ 1055 zio_t *zio, *zio_next; 1056 1057 pio->io_flags = pio->io_orig_flags; 1058 pio->io_stage = pio->io_orig_stage; 1059 pio->io_pipeline = pio->io_orig_pipeline; 1060 pio->io_reexecute = 0; 1061 pio->io_error = 0; 1062 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1063 pio->io_child_error[c] = 0; 1064 1065 if (IO_IS_ALLOCATING(pio)) { 1066 /* 1067 * Remember the failed bp so that the io_ready() callback 1068 * can update its accounting upon reexecution. The block 1069 * was already freed in zio_done(); we indicate this with 1070 * a fill count of -1 so that zio_free() knows to skip it. 1071 */ 1072 blkptr_t *bp = pio->io_bp; 1073 ASSERT(bp->blk_birth == 0 || bp->blk_birth == pio->io_txg); 1074 bp->blk_fill = BLK_FILL_ALREADY_FREED; 1075 pio->io_bp_orig = *bp; 1076 BP_ZERO(bp); 1077 } 1078 1079 /* 1080 * As we reexecute pio's children, new children could be created. 1081 * New children go to the head of the io_child list, however, 1082 * so we will (correctly) not reexecute them. The key is that 1083 * the remainder of the io_child list, from 'zio_next' onward, 1084 * cannot be affected by any side effects of reexecuting 'zio'. 1085 */ 1086 for (zio = pio->io_child; zio != NULL; zio = zio_next) { 1087 zio_next = zio->io_sibling_next; 1088 mutex_enter(&pio->io_lock); 1089 pio->io_children[zio->io_child_type][ZIO_WAIT_READY]++; 1090 pio->io_children[zio->io_child_type][ZIO_WAIT_DONE]++; 1091 mutex_exit(&pio->io_lock); 1092 zio_reexecute(zio); 1093 } 1094 1095 /* 1096 * Now that all children have been reexecuted, execute the parent. 1097 */ 1098 zio_execute(pio); 1099} 1100 1101void 1102zio_suspend(spa_t *spa, zio_t *zio) 1103{ 1104 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1105 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1106 "failure and the failure mode property for this pool " 1107 "is set to panic.", spa_name(spa)); 1108 1109 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1110 1111 mutex_enter(&spa->spa_suspend_lock); 1112 1113 if (spa->spa_suspend_zio_root == NULL) 1114 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 0); 1115 1116 spa->spa_suspended = B_TRUE; 1117 1118 if (zio != NULL) { 1119 ASSERT(zio != spa->spa_suspend_zio_root); 1120 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1121 ASSERT(zio->io_parent == NULL); 1122 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1123 zio_add_child(spa->spa_suspend_zio_root, zio); 1124 } 1125 1126 mutex_exit(&spa->spa_suspend_lock); 1127} 1128 1129void 1130zio_resume(spa_t *spa) 1131{ 1132 zio_t *pio, *zio; 1133 1134 /* 1135 * Reexecute all previously suspended i/o. 1136 */ 1137 mutex_enter(&spa->spa_suspend_lock); 1138 spa->spa_suspended = B_FALSE; 1139 cv_broadcast(&spa->spa_suspend_cv); 1140 pio = spa->spa_suspend_zio_root; 1141 spa->spa_suspend_zio_root = NULL; 1142 mutex_exit(&spa->spa_suspend_lock); 1143 1144 if (pio == NULL) 1145 return; 1146 1147 while ((zio = pio->io_child) != NULL) { 1148 zio_remove_child(pio, zio); 1149 zio->io_parent = NULL; 1150 zio_reexecute(zio); 1151 } 1152 1153 ASSERT(pio->io_children[ZIO_CHILD_LOGICAL][ZIO_WAIT_DONE] == 0); 1154 1155 (void) zio_wait(pio); 1156} 1157 1158void 1159zio_resume_wait(spa_t *spa) 1160{ 1161 mutex_enter(&spa->spa_suspend_lock); 1162 while (spa_suspended(spa)) 1163 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1164 mutex_exit(&spa->spa_suspend_lock); 1165} 1166 1167/* 1168 * ========================================================================== 1169 * Gang blocks. 1170 * 1171 * A gang block is a collection of small blocks that looks to the DMU 1172 * like one large block. When zio_dva_allocate() cannot find a block 1173 * of the requested size, due to either severe fragmentation or the pool 1174 * being nearly full, it calls zio_write_gang_block() to construct the 1175 * block from smaller fragments. 1176 * 1177 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1178 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1179 * an indirect block: it's an array of block pointers. It consumes 1180 * only one sector and hence is allocatable regardless of fragmentation. 1181 * The gang header's bps point to its gang members, which hold the data. 1182 * 1183 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1184 * as the verifier to ensure uniqueness of the SHA256 checksum. 1185 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1186 * not the gang header. This ensures that data block signatures (needed for 1187 * deduplication) are independent of how the block is physically stored. 1188 * 1189 * Gang blocks can be nested: a gang member may itself be a gang block. 1190 * Thus every gang block is a tree in which root and all interior nodes are 1191 * gang headers, and the leaves are normal blocks that contain user data. 1192 * The root of the gang tree is called the gang leader. 1193 * 1194 * To perform any operation (read, rewrite, free, claim) on a gang block, 1195 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1196 * in the io_gang_tree field of the original logical i/o by recursively 1197 * reading the gang leader and all gang headers below it. This yields 1198 * an in-core tree containing the contents of every gang header and the 1199 * bps for every constituent of the gang block. 1200 * 1201 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1202 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1203 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1204 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1205 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1206 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1207 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1208 * of the gang header plus zio_checksum_compute() of the data to update the 1209 * gang header's blk_cksum as described above. 1210 * 1211 * The two-phase assemble/issue model solves the problem of partial failure -- 1212 * what if you'd freed part of a gang block but then couldn't read the 1213 * gang header for another part? Assembling the entire gang tree first 1214 * ensures that all the necessary gang header I/O has succeeded before 1215 * starting the actual work of free, claim, or write. Once the gang tree 1216 * is assembled, free and claim are in-memory operations that cannot fail. 1217 * 1218 * In the event that a gang write fails, zio_dva_unallocate() walks the 1219 * gang tree to immediately free (i.e. insert back into the space map) 1220 * everything we've allocated. This ensures that we don't get ENOSPC 1221 * errors during repeated suspend/resume cycles due to a flaky device. 1222 * 1223 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1224 * the gang tree, we won't modify the block, so we can safely defer the free 1225 * (knowing that the block is still intact). If we *can* assemble the gang 1226 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1227 * each constituent bp and we can allocate a new block on the next sync pass. 1228 * 1229 * In all cases, the gang tree allows complete recovery from partial failure. 1230 * ========================================================================== 1231 */ 1232 1233static zio_t * 1234zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1235{ 1236 if (gn != NULL) 1237 return (pio); 1238 1239 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1240 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1241 &pio->io_bookmark)); 1242} 1243 1244zio_t * 1245zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1246{ 1247 zio_t *zio; 1248 1249 if (gn != NULL) { 1250 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1251 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1252 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1253 /* 1254 * As we rewrite each gang header, the pipeline will compute 1255 * a new gang block header checksum for it; but no one will 1256 * compute a new data checksum, so we do that here. The one 1257 * exception is the gang leader: the pipeline already computed 1258 * its data checksum because that stage precedes gang assembly. 1259 * (Presently, nothing actually uses interior data checksums; 1260 * this is just good hygiene.) 1261 */ 1262 if (gn != pio->io_logical->io_gang_tree) { 1263 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1264 data, BP_GET_PSIZE(bp)); 1265 } 1266 } else { 1267 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1268 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1269 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1270 } 1271 1272 return (zio); 1273} 1274 1275/* ARGSUSED */ 1276zio_t * 1277zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1278{ 1279 return (zio_free(pio, pio->io_spa, pio->io_txg, bp, 1280 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1281} 1282 1283/* ARGSUSED */ 1284zio_t * 1285zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1286{ 1287 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1288 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1289} 1290 1291static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1292 NULL, 1293 zio_read_gang, 1294 zio_rewrite_gang, 1295 zio_free_gang, 1296 zio_claim_gang, 1297 NULL 1298}; 1299 1300static void zio_gang_tree_assemble_done(zio_t *zio); 1301 1302static zio_gang_node_t * 1303zio_gang_node_alloc(zio_gang_node_t **gnpp) 1304{ 1305 zio_gang_node_t *gn; 1306 1307 ASSERT(*gnpp == NULL); 1308 1309 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1310 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1311 *gnpp = gn; 1312 1313 return (gn); 1314} 1315 1316static void 1317zio_gang_node_free(zio_gang_node_t **gnpp) 1318{ 1319 zio_gang_node_t *gn = *gnpp; 1320 1321 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1322 ASSERT(gn->gn_child[g] == NULL); 1323 1324 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1325 kmem_free(gn, sizeof (*gn)); 1326 *gnpp = NULL; 1327} 1328 1329static void 1330zio_gang_tree_free(zio_gang_node_t **gnpp) 1331{ 1332 zio_gang_node_t *gn = *gnpp; 1333 1334 if (gn == NULL) 1335 return; 1336 1337 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1338 zio_gang_tree_free(&gn->gn_child[g]); 1339 1340 zio_gang_node_free(gnpp); 1341} 1342 1343static void 1344zio_gang_tree_assemble(zio_t *lio, blkptr_t *bp, zio_gang_node_t **gnpp) 1345{ 1346 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1347 1348 ASSERT(lio->io_logical == lio); 1349 ASSERT(BP_IS_GANG(bp)); 1350 1351 zio_nowait(zio_read(lio, lio->io_spa, bp, gn->gn_gbh, 1352 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1353 lio->io_priority, ZIO_GANG_CHILD_FLAGS(lio), &lio->io_bookmark)); 1354} 1355 1356static void 1357zio_gang_tree_assemble_done(zio_t *zio) 1358{ 1359 zio_t *lio = zio->io_logical; 1360 zio_gang_node_t *gn = zio->io_private; 1361 blkptr_t *bp = zio->io_bp; 1362 1363 ASSERT(zio->io_parent == lio); 1364 ASSERT(zio->io_child == NULL); 1365 1366 if (zio->io_error) 1367 return; 1368 1369 if (BP_SHOULD_BYTESWAP(bp)) 1370 byteswap_uint64_array(zio->io_data, zio->io_size); 1371 1372 ASSERT(zio->io_data == gn->gn_gbh); 1373 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1374 ASSERT(gn->gn_gbh->zg_tail.zbt_magic == ZBT_MAGIC); 1375 1376 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1377 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1378 if (!BP_IS_GANG(gbp)) 1379 continue; 1380 zio_gang_tree_assemble(lio, gbp, &gn->gn_child[g]); 1381 } 1382} 1383 1384static void 1385zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1386{ 1387 zio_t *lio = pio->io_logical; 1388 zio_t *zio; 1389 1390 ASSERT(BP_IS_GANG(bp) == !!gn); 1391 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(lio->io_bp)); 1392 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == lio->io_gang_tree); 1393 1394 /* 1395 * If you're a gang header, your data is in gn->gn_gbh. 1396 * If you're a gang member, your data is in 'data' and gn == NULL. 1397 */ 1398 zio = zio_gang_issue_func[lio->io_type](pio, bp, gn, data); 1399 1400 if (gn != NULL) { 1401 ASSERT(gn->gn_gbh->zg_tail.zbt_magic == ZBT_MAGIC); 1402 1403 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1404 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1405 if (BP_IS_HOLE(gbp)) 1406 continue; 1407 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1408 data = (char *)data + BP_GET_PSIZE(gbp); 1409 } 1410 } 1411 1412 if (gn == lio->io_gang_tree) 1413 ASSERT3P((char *)lio->io_data + lio->io_size, ==, data); 1414 1415 if (zio != pio) 1416 zio_nowait(zio); 1417} 1418 1419static int 1420zio_gang_assemble(zio_t *zio) 1421{ 1422 blkptr_t *bp = zio->io_bp; 1423 1424 ASSERT(BP_IS_GANG(bp) && zio == zio->io_logical); 1425 1426 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1427 1428 return (ZIO_PIPELINE_CONTINUE); 1429} 1430 1431static int 1432zio_gang_issue(zio_t *zio) 1433{ 1434 zio_t *lio = zio->io_logical; 1435 blkptr_t *bp = zio->io_bp; 1436 1437 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1438 return (ZIO_PIPELINE_STOP); 1439 1440 ASSERT(BP_IS_GANG(bp) && zio == lio); 1441 1442 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1443 zio_gang_tree_issue(lio, lio->io_gang_tree, bp, lio->io_data); 1444 else 1445 zio_gang_tree_free(&lio->io_gang_tree); 1446 1447 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1448 1449 return (ZIO_PIPELINE_CONTINUE); 1450} 1451 1452static void 1453zio_write_gang_member_ready(zio_t *zio) 1454{ 1455 zio_t *pio = zio->io_parent; 1456 zio_t *lio = zio->io_logical; 1457 dva_t *cdva = zio->io_bp->blk_dva; 1458 dva_t *pdva = pio->io_bp->blk_dva; 1459 uint64_t asize; 1460 1461 if (BP_IS_HOLE(zio->io_bp)) 1462 return; 1463 1464 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1465 1466 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1467 ASSERT3U(zio->io_prop.zp_ndvas, ==, lio->io_prop.zp_ndvas); 1468 ASSERT3U(zio->io_prop.zp_ndvas, <=, BP_GET_NDVAS(zio->io_bp)); 1469 ASSERT3U(pio->io_prop.zp_ndvas, <=, BP_GET_NDVAS(pio->io_bp)); 1470 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1471 1472 mutex_enter(&pio->io_lock); 1473 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1474 ASSERT(DVA_GET_GANG(&pdva[d])); 1475 asize = DVA_GET_ASIZE(&pdva[d]); 1476 asize += DVA_GET_ASIZE(&cdva[d]); 1477 DVA_SET_ASIZE(&pdva[d], asize); 1478 } 1479 mutex_exit(&pio->io_lock); 1480} 1481 1482static int 1483zio_write_gang_block(zio_t *pio) 1484{ 1485 spa_t *spa = pio->io_spa; 1486 blkptr_t *bp = pio->io_bp; 1487 zio_t *lio = pio->io_logical; 1488 zio_t *zio; 1489 zio_gang_node_t *gn, **gnpp; 1490 zio_gbh_phys_t *gbh; 1491 uint64_t txg = pio->io_txg; 1492 uint64_t resid = pio->io_size; 1493 uint64_t lsize; 1494 int ndvas = lio->io_prop.zp_ndvas; 1495 int gbh_ndvas = MIN(ndvas + 1, spa_max_replication(spa)); 1496 zio_prop_t zp; 1497 int error; 1498 1499 error = metaslab_alloc(spa, spa->spa_normal_class, SPA_GANGBLOCKSIZE, 1500 bp, gbh_ndvas, txg, pio == lio ? NULL : lio->io_bp, 1501 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1502 if (error) { 1503 pio->io_error = error; 1504 return (ZIO_PIPELINE_CONTINUE); 1505 } 1506 1507 if (pio == lio) { 1508 gnpp = &lio->io_gang_tree; 1509 } else { 1510 gnpp = pio->io_private; 1511 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1512 } 1513 1514 gn = zio_gang_node_alloc(gnpp); 1515 gbh = gn->gn_gbh; 1516 bzero(gbh, SPA_GANGBLOCKSIZE); 1517 1518 /* 1519 * Create the gang header. 1520 */ 1521 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1522 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1523 1524 /* 1525 * Create and nowait the gang children. 1526 */ 1527 for (int g = 0; resid != 0; resid -= lsize, g++) { 1528 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1529 SPA_MINBLOCKSIZE); 1530 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1531 1532 zp.zp_checksum = lio->io_prop.zp_checksum; 1533 zp.zp_compress = ZIO_COMPRESS_OFF; 1534 zp.zp_type = DMU_OT_NONE; 1535 zp.zp_level = 0; 1536 zp.zp_ndvas = lio->io_prop.zp_ndvas; 1537 1538 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1539 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1540 zio_write_gang_member_ready, NULL, &gn->gn_child[g], 1541 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1542 &pio->io_bookmark)); 1543 } 1544 1545 /* 1546 * Set pio's pipeline to just wait for zio to finish. 1547 */ 1548 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1549 1550 zio_nowait(zio); 1551 1552 return (ZIO_PIPELINE_CONTINUE); 1553} 1554 1555/* 1556 * ========================================================================== 1557 * Allocate and free blocks 1558 * ========================================================================== 1559 */ 1560 1561static int 1562zio_dva_allocate(zio_t *zio) 1563{ 1564 spa_t *spa = zio->io_spa; 1565 metaslab_class_t *mc = spa->spa_normal_class; 1566 blkptr_t *bp = zio->io_bp; 1567 int error; 1568 1569 ASSERT(BP_IS_HOLE(bp)); 1570 ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 1571 ASSERT3U(zio->io_prop.zp_ndvas, >, 0); 1572 ASSERT3U(zio->io_prop.zp_ndvas, <=, spa_max_replication(spa)); 1573 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 1574 1575 error = metaslab_alloc(spa, mc, zio->io_size, bp, 1576 zio->io_prop.zp_ndvas, zio->io_txg, NULL, 0); 1577 1578 if (error) { 1579 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 1580 return (zio_write_gang_block(zio)); 1581 zio->io_error = error; 1582 } 1583 1584 return (ZIO_PIPELINE_CONTINUE); 1585} 1586 1587static int 1588zio_dva_free(zio_t *zio) 1589{ 1590 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 1591 1592 return (ZIO_PIPELINE_CONTINUE); 1593} 1594 1595static int 1596zio_dva_claim(zio_t *zio) 1597{ 1598 int error; 1599 1600 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 1601 if (error) 1602 zio->io_error = error; 1603 1604 return (ZIO_PIPELINE_CONTINUE); 1605} 1606 1607/* 1608 * Undo an allocation. This is used by zio_done() when an I/O fails 1609 * and we want to give back the block we just allocated. 1610 * This handles both normal blocks and gang blocks. 1611 */ 1612static void 1613zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 1614{ 1615 spa_t *spa = zio->io_spa; 1616 boolean_t now = !(zio->io_flags & ZIO_FLAG_IO_REWRITE); 1617 1618 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 1619 1620 if (zio->io_bp == bp && !now) { 1621 /* 1622 * This is a rewrite for sync-to-convergence. 1623 * We can't do a metaslab_free(NOW) because bp wasn't allocated 1624 * during this sync pass, which means that metaslab_sync() 1625 * already committed the allocation. 1626 */ 1627 ASSERT(DVA_EQUAL(BP_IDENTITY(bp), 1628 BP_IDENTITY(&zio->io_bp_orig))); 1629 ASSERT(spa_sync_pass(spa) > 1); 1630 1631 if (BP_IS_GANG(bp) && gn == NULL) { 1632 /* 1633 * This is a gang leader whose gang header(s) we 1634 * couldn't read now, so defer the free until later. 1635 * The block should still be intact because without 1636 * the headers, we'd never even start the rewrite. 1637 */ 1638 bplist_enqueue_deferred(&spa->spa_sync_bplist, bp); 1639 return; 1640 } 1641 } 1642 1643 if (!BP_IS_HOLE(bp)) 1644 metaslab_free(spa, bp, bp->blk_birth, now); 1645 1646 if (gn != NULL) { 1647 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1648 zio_dva_unallocate(zio, gn->gn_child[g], 1649 &gn->gn_gbh->zg_blkptr[g]); 1650 } 1651 } 1652} 1653 1654/* 1655 * Try to allocate an intent log block. Return 0 on success, errno on failure. 1656 */ 1657int 1658zio_alloc_blk(spa_t *spa, uint64_t size, blkptr_t *new_bp, blkptr_t *old_bp, 1659 uint64_t txg) 1660{ 1661 int error; 1662 1663 error = metaslab_alloc(spa, spa->spa_log_class, size, 1664 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID); 1665 1666 if (error) 1667 error = metaslab_alloc(spa, spa->spa_normal_class, size, 1668 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID); 1669 1670 if (error == 0) { 1671 BP_SET_LSIZE(new_bp, size); 1672 BP_SET_PSIZE(new_bp, size); 1673 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 1674 BP_SET_CHECKSUM(new_bp, ZIO_CHECKSUM_ZILOG); 1675 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 1676 BP_SET_LEVEL(new_bp, 0); 1677 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 1678 } 1679 1680 return (error); 1681} 1682 1683/* 1684 * Free an intent log block. We know it can't be a gang block, so there's 1685 * nothing to do except metaslab_free() it. 1686 */ 1687void 1688zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg) 1689{ 1690 ASSERT(!BP_IS_GANG(bp)); 1691 1692 metaslab_free(spa, bp, txg, B_FALSE); 1693} 1694 1695/* 1696 * ========================================================================== 1697 * Read and write to physical devices 1698 * ========================================================================== 1699 */ 1700 1701static void 1702zio_vdev_io_probe_done(zio_t *zio) 1703{ 1704 zio_t *dio; 1705 vdev_t *vd = zio->io_private; 1706 1707 mutex_enter(&vd->vdev_probe_lock); 1708 ASSERT(vd->vdev_probe_zio == zio); 1709 vd->vdev_probe_zio = NULL; 1710 mutex_exit(&vd->vdev_probe_lock); 1711 1712 while ((dio = zio->io_delegate_list) != NULL) { 1713 zio->io_delegate_list = dio->io_delegate_next; 1714 dio->io_delegate_next = NULL; 1715 if (!vdev_accessible(vd, dio)) 1716 dio->io_error = ENXIO; 1717 zio_execute(dio); 1718 } 1719} 1720 1721/* 1722 * Probe the device to determine whether I/O failure is specific to this 1723 * zio (e.g. a bad sector) or affects the entire vdev (e.g. unplugged). 1724 */ 1725static int 1726zio_vdev_io_probe(zio_t *zio) 1727{ 1728 vdev_t *vd = zio->io_vd; 1729 zio_t *pio = NULL; 1730 boolean_t created_pio = B_FALSE; 1731 1732 /* 1733 * Don't probe the probe. 1734 */ 1735 if (zio->io_flags & ZIO_FLAG_PROBE) 1736 return (ZIO_PIPELINE_CONTINUE); 1737 1738 /* 1739 * To prevent 'probe storms' when a device fails, we create 1740 * just one probe i/o at a time. All zios that want to probe 1741 * this vdev will join the probe zio's io_delegate_list. 1742 */ 1743 mutex_enter(&vd->vdev_probe_lock); 1744 1745 if ((pio = vd->vdev_probe_zio) == NULL) { 1746 vd->vdev_probe_zio = pio = zio_root(zio->io_spa, 1747 zio_vdev_io_probe_done, vd, ZIO_FLAG_CANFAIL); 1748 created_pio = B_TRUE; 1749 vd->vdev_probe_wanted = B_TRUE; 1750 spa_async_request(zio->io_spa, SPA_ASYNC_PROBE); 1751 } 1752 1753 zio->io_delegate_next = pio->io_delegate_list; 1754 pio->io_delegate_list = zio; 1755 1756 mutex_exit(&vd->vdev_probe_lock); 1757 1758 if (created_pio) { 1759 zio_nowait(vdev_probe(vd, pio)); 1760 zio_nowait(pio); 1761 } 1762 1763 return (ZIO_PIPELINE_STOP); 1764} 1765 1766static int 1767zio_vdev_io_start(zio_t *zio) 1768{ 1769 vdev_t *vd = zio->io_vd; 1770 uint64_t align; 1771 spa_t *spa = zio->io_spa; 1772 1773 ASSERT(zio->io_error == 0); 1774 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 1775 1776 if (vd == NULL) { 1777 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 1778 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 1779 1780 /* 1781 * The mirror_ops handle multiple DVAs in a single BP. 1782 */ 1783 return (vdev_mirror_ops.vdev_op_io_start(zio)); 1784 } 1785 1786 align = 1ULL << vd->vdev_top->vdev_ashift; 1787 1788 if (P2PHASE(zio->io_size, align) != 0) { 1789 uint64_t asize = P2ROUNDUP(zio->io_size, align); 1790 char *abuf = zio_buf_alloc(asize); 1791 ASSERT(vd == vd->vdev_top); 1792 if (zio->io_type == ZIO_TYPE_WRITE) { 1793 bcopy(zio->io_data, abuf, zio->io_size); 1794 bzero(abuf + zio->io_size, asize - zio->io_size); 1795 } 1796 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 1797 } 1798 1799 ASSERT(P2PHASE(zio->io_offset, align) == 0); 1800 ASSERT(P2PHASE(zio->io_size, align) == 0); 1801 ASSERT(zio->io_type != ZIO_TYPE_WRITE || (spa_mode & FWRITE)); 1802 1803 if (vd->vdev_ops->vdev_op_leaf && 1804 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 1805 1806 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) 1807 return (ZIO_PIPELINE_STOP); 1808 1809 if ((zio = vdev_queue_io(zio)) == NULL) 1810 return (ZIO_PIPELINE_STOP); 1811 1812 if (!vdev_accessible(vd, zio)) { 1813 zio->io_error = ENXIO; 1814 zio_interrupt(zio); 1815 return (ZIO_PIPELINE_STOP); 1816 } 1817 1818 } 1819 1820 return (vd->vdev_ops->vdev_op_io_start(zio)); 1821} 1822 1823static int 1824zio_vdev_io_done(zio_t *zio) 1825{ 1826 vdev_t *vd = zio->io_vd; 1827 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 1828 boolean_t unexpected_error = B_FALSE; 1829 1830 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 1831 return (ZIO_PIPELINE_STOP); 1832 1833 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 1834 1835 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 1836 1837 vdev_queue_io_done(zio); 1838 1839 if (zio->io_type == ZIO_TYPE_WRITE) 1840 vdev_cache_write(zio); 1841 1842 if (zio_injection_enabled && zio->io_error == 0) 1843 zio->io_error = zio_handle_device_injection(vd, EIO); 1844 1845 if (zio_injection_enabled && zio->io_error == 0) 1846 zio->io_error = zio_handle_label_injection(zio, EIO); 1847 1848 if (zio->io_error) { 1849 if (!vdev_accessible(vd, zio)) { 1850 zio->io_error = ENXIO; 1851 } else { 1852 unexpected_error = B_TRUE; 1853 } 1854 } 1855 } 1856 1857 ops->vdev_op_io_done(zio); 1858 1859 if (unexpected_error) 1860 return (zio_vdev_io_probe(zio)); 1861 1862 return (ZIO_PIPELINE_CONTINUE); 1863} 1864 1865static int 1866zio_vdev_io_assess(zio_t *zio) 1867{ 1868 vdev_t *vd = zio->io_vd; 1869 1870 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 1871 return (ZIO_PIPELINE_STOP); 1872 1873 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 1874 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 1875 1876 if (zio->io_vsd != NULL) { 1877 zio->io_vsd_free(zio); 1878 zio->io_vsd = NULL; 1879 } 1880 1881 if (zio_injection_enabled && zio->io_error == 0) 1882 zio->io_error = zio_handle_fault_injection(zio, EIO); 1883 1884 /* 1885 * If the I/O failed, determine whether we should attempt to retry it. 1886 */ 1887 if (zio->io_error && vd == NULL && 1888 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 1889 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 1890 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 1891 zio->io_error = 0; 1892 zio->io_flags |= ZIO_FLAG_IO_RETRY | 1893 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 1894 zio->io_stage = ZIO_STAGE_VDEV_IO_START - 1; 1895 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE); 1896 return (ZIO_PIPELINE_STOP); 1897 } 1898 1899 /* 1900 * If we got an error on a leaf device, convert it to ENXIO 1901 * if the device is not accessible at all. 1902 */ 1903 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 1904 !vdev_accessible(vd, zio)) 1905 zio->io_error = ENXIO; 1906 1907 /* 1908 * If we can't write to an interior vdev (mirror or RAID-Z), 1909 * set vdev_cant_write so that we stop trying to allocate from it. 1910 */ 1911 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 1912 vd != NULL && !vd->vdev_ops->vdev_op_leaf) 1913 vd->vdev_cant_write = B_TRUE; 1914 1915 if (zio->io_error) 1916 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1917 1918 return (ZIO_PIPELINE_CONTINUE); 1919} 1920 1921void 1922zio_vdev_io_reissue(zio_t *zio) 1923{ 1924 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1925 ASSERT(zio->io_error == 0); 1926 1927 zio->io_stage--; 1928} 1929 1930void 1931zio_vdev_io_redone(zio_t *zio) 1932{ 1933 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 1934 1935 zio->io_stage--; 1936} 1937 1938void 1939zio_vdev_io_bypass(zio_t *zio) 1940{ 1941 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1942 ASSERT(zio->io_error == 0); 1943 1944 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 1945 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS - 1; 1946} 1947 1948/* 1949 * ========================================================================== 1950 * Generate and verify checksums 1951 * ========================================================================== 1952 */ 1953static int 1954zio_checksum_generate(zio_t *zio) 1955{ 1956 blkptr_t *bp = zio->io_bp; 1957 enum zio_checksum checksum; 1958 1959 if (bp == NULL) { 1960 /* 1961 * This is zio_write_phys(). 1962 * We're either generating a label checksum, or none at all. 1963 */ 1964 checksum = zio->io_prop.zp_checksum; 1965 1966 if (checksum == ZIO_CHECKSUM_OFF) 1967 return (ZIO_PIPELINE_CONTINUE); 1968 1969 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 1970 } else { 1971 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 1972 ASSERT(!IO_IS_ALLOCATING(zio)); 1973 checksum = ZIO_CHECKSUM_GANG_HEADER; 1974 } else { 1975 checksum = BP_GET_CHECKSUM(bp); 1976 } 1977 } 1978 1979 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 1980 1981 return (ZIO_PIPELINE_CONTINUE); 1982} 1983 1984static int 1985zio_checksum_verify(zio_t *zio) 1986{ 1987 blkptr_t *bp = zio->io_bp; 1988 int error; 1989 1990 if (bp == NULL) { 1991 /* 1992 * This is zio_read_phys(). 1993 * We're either verifying a label checksum, or nothing at all. 1994 */ 1995 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 1996 return (ZIO_PIPELINE_CONTINUE); 1997 1998 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 1999 } 2000 2001 if ((error = zio_checksum_error(zio)) != 0) { 2002 zio->io_error = error; 2003 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2004 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, 2005 zio->io_spa, zio->io_vd, zio, 0, 0); 2006 } 2007 } 2008 2009 return (ZIO_PIPELINE_CONTINUE); 2010} 2011 2012/* 2013 * Called by RAID-Z to ensure we don't compute the checksum twice. 2014 */ 2015void 2016zio_checksum_verified(zio_t *zio) 2017{ 2018 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 2019} 2020 2021/* 2022 * ========================================================================== 2023 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2024 * An error of 0 indictes success. ENXIO indicates whole-device failure, 2025 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2026 * indicate errors that are specific to one I/O, and most likely permanent. 2027 * Any other error is presumed to be worse because we weren't expecting it. 2028 * ========================================================================== 2029 */ 2030int 2031zio_worst_error(int e1, int e2) 2032{ 2033 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2034 int r1, r2; 2035 2036 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2037 if (e1 == zio_error_rank[r1]) 2038 break; 2039 2040 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2041 if (e2 == zio_error_rank[r2]) 2042 break; 2043 2044 return (r1 > r2 ? e1 : e2); 2045} 2046 2047/* 2048 * ========================================================================== 2049 * I/O completion 2050 * ========================================================================== 2051 */ 2052static int 2053zio_ready(zio_t *zio) 2054{ 2055 blkptr_t *bp = zio->io_bp; 2056 zio_t *pio = zio->io_parent; 2057 2058 if (zio->io_ready) { 2059 if (BP_IS_GANG(bp) && 2060 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY)) 2061 return (ZIO_PIPELINE_STOP); 2062 2063 ASSERT(IO_IS_ALLOCATING(zio)); 2064 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2065 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 2066 2067 zio->io_ready(zio); 2068 } 2069 2070 if (bp != NULL && bp != &zio->io_bp_copy) 2071 zio->io_bp_copy = *bp; 2072 2073 if (zio->io_error) 2074 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2075 2076 if (pio != NULL) 2077 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 2078 2079 return (ZIO_PIPELINE_CONTINUE); 2080} 2081 2082static int 2083zio_done(zio_t *zio) 2084{ 2085 spa_t *spa = zio->io_spa; 2086 zio_t *pio = zio->io_parent; 2087 zio_t *lio = zio->io_logical; 2088 blkptr_t *bp = zio->io_bp; 2089 vdev_t *vd = zio->io_vd; 2090 uint64_t psize = zio->io_size; 2091 2092 /* 2093 * If our of children haven't all completed, 2094 * wait for them and then repeat this pipeline stage. 2095 */ 2096 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 2097 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 2098 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 2099 return (ZIO_PIPELINE_STOP); 2100 2101 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2102 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2103 ASSERT(zio->io_children[c][w] == 0); 2104 2105 if (bp != NULL) { 2106 ASSERT(bp->blk_pad[0] == 0); 2107 ASSERT(bp->blk_pad[1] == 0); 2108 ASSERT(bp->blk_pad[2] == 0); 2109 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 2110 (pio != NULL && bp == pio->io_bp)); 2111 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 2112 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 2113 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 2114 ASSERT3U(zio->io_prop.zp_ndvas, <=, BP_GET_NDVAS(bp)); 2115 ASSERT(BP_COUNT_GANG(bp) == 0 || 2116 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 2117 } 2118 } 2119 2120 /* 2121 * If there were child vdev or gang errors, they apply to us now. 2122 */ 2123 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 2124 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 2125 2126 zio_pop_transforms(zio); /* note: may set zio->io_error */ 2127 2128 vdev_stat_update(zio, psize); 2129 2130 if (zio->io_error) { 2131 /* 2132 * If this I/O is attached to a particular vdev, 2133 * generate an error message describing the I/O failure 2134 * at the block level. We ignore these errors if the 2135 * device is currently unavailable. 2136 */ 2137 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 2138 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 2139 2140 if ((zio->io_error == EIO || 2141 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) && zio == lio) { 2142 /* 2143 * For logical I/O requests, tell the SPA to log the 2144 * error and generate a logical data ereport. 2145 */ 2146 spa_log_error(spa, zio); 2147 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 2148 0, 0); 2149 } 2150 } 2151 2152 if (zio->io_error && zio == lio) { 2153 /* 2154 * Determine whether zio should be reexecuted. This will 2155 * propagate all the way to the root via zio_notify_parent(). 2156 */ 2157 ASSERT(vd == NULL && bp != NULL); 2158 2159 if (IO_IS_ALLOCATING(zio)) 2160 if (zio->io_error != ENOSPC) 2161 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 2162 else 2163 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2164 2165 if ((zio->io_type == ZIO_TYPE_READ || 2166 zio->io_type == ZIO_TYPE_FREE) && 2167 zio->io_error == ENXIO && 2168 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 2169 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2170 2171 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 2172 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2173 } 2174 2175 /* 2176 * If there were logical child errors, they apply to us now. 2177 * We defer this until now to avoid conflating logical child 2178 * errors with errors that happened to the zio itself when 2179 * updating vdev stats and reporting FMA events above. 2180 */ 2181 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 2182 2183 if (zio->io_reexecute) { 2184 /* 2185 * This is a logical I/O that wants to reexecute. 2186 * 2187 * Reexecute is top-down. When an i/o fails, if it's not 2188 * the root, it simply notifies its parent and sticks around. 2189 * The parent, seeing that it still has children in zio_done(), 2190 * does the same. This percolates all the way up to the root. 2191 * The root i/o will reexecute or suspend the entire tree. 2192 * 2193 * This approach ensures that zio_reexecute() honors 2194 * all the original i/o dependency relationships, e.g. 2195 * parents not executing until children are ready. 2196 */ 2197 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2198 2199 if (IO_IS_ALLOCATING(zio)) 2200 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 2201 2202 zio_gang_tree_free(&zio->io_gang_tree); 2203 2204 if (pio != NULL) { 2205 /* 2206 * We're not a root i/o, so there's nothing to do 2207 * but notify our parent. Don't propagate errors 2208 * upward since we haven't permanently failed yet. 2209 */ 2210 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 2211 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2212 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 2213 /* 2214 * We'd fail again if we reexecuted now, so suspend 2215 * until conditions improve (e.g. device comes online). 2216 */ 2217 zio_suspend(spa, zio); 2218 } else { 2219 /* 2220 * Reexecution is potentially a huge amount of work. 2221 * Hand it off to the otherwise-unused claim taskq. 2222 */ 2223 (void) taskq_dispatch( 2224 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 2225 (task_func_t *)zio_reexecute, zio, TQ_SLEEP); 2226 } 2227 return (ZIO_PIPELINE_STOP); 2228 } 2229 2230 ASSERT(zio->io_child == NULL); 2231 ASSERT(zio->io_reexecute == 0); 2232 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 2233 2234 if (zio->io_done) 2235 zio->io_done(zio); 2236 2237 zio_gang_tree_free(&zio->io_gang_tree); 2238 2239 ASSERT(zio->io_delegate_list == NULL); 2240 ASSERT(zio->io_delegate_next == NULL); 2241 2242 if (pio != NULL) { 2243 zio_remove_child(pio, zio); 2244 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2245 } 2246 2247 if (zio->io_waiter != NULL) { 2248 mutex_enter(&zio->io_lock); 2249 zio->io_executor = NULL; 2250 cv_broadcast(&zio->io_cv); 2251 mutex_exit(&zio->io_lock); 2252 } else { 2253 zio_destroy(zio); 2254 } 2255 2256 return (ZIO_PIPELINE_STOP); 2257} 2258 2259/* 2260 * ========================================================================== 2261 * I/O pipeline definition 2262 * ========================================================================== 2263 */ 2264static zio_pipe_stage_t *zio_pipeline[ZIO_STAGES] = { 2265 NULL, 2266 zio_issue_async, 2267 zio_read_bp_init, 2268 zio_write_bp_init, 2269 zio_checksum_generate, 2270 zio_gang_assemble, 2271 zio_gang_issue, 2272 zio_dva_allocate, 2273 zio_dva_free, 2274 zio_dva_claim, 2275 zio_ready, 2276 zio_vdev_io_start, 2277 zio_vdev_io_done, 2278 zio_vdev_io_assess, 2279 zio_checksum_verify, 2280 zio_done 2281}; 2282