zio.c revision 219089
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25#include <sys/zfs_context.h> 26#include <sys/fm/fs/zfs.h> 27#include <sys/spa.h> 28#include <sys/txg.h> 29#include <sys/spa_impl.h> 30#include <sys/vdev_impl.h> 31#include <sys/zio_impl.h> 32#include <sys/zio_compress.h> 33#include <sys/zio_checksum.h> 34#include <sys/dmu_objset.h> 35#include <sys/arc.h> 36#include <sys/ddt.h> 37 38SYSCTL_DECL(_vfs_zfs); 39SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 40static int zio_use_uma = 0; 41TUNABLE_INT("vfs.zfs.zio.use_uma", &zio_use_uma); 42SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, 43 "Use uma(9) for ZIO allocations"); 44 45/* 46 * ========================================================================== 47 * I/O priority table 48 * ========================================================================== 49 */ 50uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 51 0, /* ZIO_PRIORITY_NOW */ 52 0, /* ZIO_PRIORITY_SYNC_READ */ 53 0, /* ZIO_PRIORITY_SYNC_WRITE */ 54 0, /* ZIO_PRIORITY_LOG_WRITE */ 55 1, /* ZIO_PRIORITY_CACHE_FILL */ 56 1, /* ZIO_PRIORITY_AGG */ 57 4, /* ZIO_PRIORITY_FREE */ 58 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 59 6, /* ZIO_PRIORITY_ASYNC_READ */ 60 10, /* ZIO_PRIORITY_RESILVER */ 61 20, /* ZIO_PRIORITY_SCRUB */ 62 2, /* ZIO_PRIORITY_DDT_PREFETCH */ 63}; 64 65/* 66 * ========================================================================== 67 * I/O type descriptions 68 * ========================================================================== 69 */ 70char *zio_type_name[ZIO_TYPES] = { 71 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 72 "zio_ioctl" 73}; 74 75/* 76 * ========================================================================== 77 * I/O kmem caches 78 * ========================================================================== 79 */ 80kmem_cache_t *zio_cache; 81kmem_cache_t *zio_link_cache; 82kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 83kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 84 85#ifdef _KERNEL 86extern vmem_t *zio_alloc_arena; 87#endif 88 89/* 90 * An allocating zio is one that either currently has the DVA allocate 91 * stage set or will have it later in its lifetime. 92 */ 93#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 94 95boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 96 97#ifdef ZFS_DEBUG 98int zio_buf_debug_limit = 16384; 99#else 100int zio_buf_debug_limit = 0; 101#endif 102 103void 104zio_init(void) 105{ 106 size_t c; 107 zio_cache = kmem_cache_create("zio_cache", 108 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 109 zio_link_cache = kmem_cache_create("zio_link_cache", 110 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 111 112 /* 113 * For small buffers, we want a cache for each multiple of 114 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 115 * for each quarter-power of 2. For large buffers, we want 116 * a cache for each multiple of PAGESIZE. 117 */ 118 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 119 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 120 size_t p2 = size; 121 size_t align = 0; 122 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 123 124 while (p2 & (p2 - 1)) 125 p2 &= p2 - 1; 126 127 if (size <= 4 * SPA_MINBLOCKSIZE) { 128 align = SPA_MINBLOCKSIZE; 129 } else if (P2PHASE(size, PAGESIZE) == 0) { 130 align = PAGESIZE; 131 } else if (P2PHASE(size, p2 >> 2) == 0) { 132 align = p2 >> 2; 133 } 134 135 if (align != 0) { 136 char name[36]; 137 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 138 zio_buf_cache[c] = kmem_cache_create(name, size, 139 align, NULL, NULL, NULL, NULL, NULL, cflags); 140 141 /* 142 * Since zio_data bufs do not appear in crash dumps, we 143 * pass KMC_NOTOUCH so that no allocator metadata is 144 * stored with the buffers. 145 */ 146 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 147 zio_data_buf_cache[c] = kmem_cache_create(name, size, 148 align, NULL, NULL, NULL, NULL, NULL, 149 cflags | KMC_NOTOUCH); 150 } 151 } 152 153 while (--c != 0) { 154 ASSERT(zio_buf_cache[c] != NULL); 155 if (zio_buf_cache[c - 1] == NULL) 156 zio_buf_cache[c - 1] = zio_buf_cache[c]; 157 158 ASSERT(zio_data_buf_cache[c] != NULL); 159 if (zio_data_buf_cache[c - 1] == NULL) 160 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 161 } 162 163 zio_inject_init(); 164} 165 166void 167zio_fini(void) 168{ 169 size_t c; 170 kmem_cache_t *last_cache = NULL; 171 kmem_cache_t *last_data_cache = NULL; 172 173 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 174 if (zio_buf_cache[c] != last_cache) { 175 last_cache = zio_buf_cache[c]; 176 kmem_cache_destroy(zio_buf_cache[c]); 177 } 178 zio_buf_cache[c] = NULL; 179 180 if (zio_data_buf_cache[c] != last_data_cache) { 181 last_data_cache = zio_data_buf_cache[c]; 182 kmem_cache_destroy(zio_data_buf_cache[c]); 183 } 184 zio_data_buf_cache[c] = NULL; 185 } 186 187 kmem_cache_destroy(zio_link_cache); 188 kmem_cache_destroy(zio_cache); 189 190 zio_inject_fini(); 191} 192 193/* 194 * ========================================================================== 195 * Allocate and free I/O buffers 196 * ========================================================================== 197 */ 198 199/* 200 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 201 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 202 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 203 * excess / transient data in-core during a crashdump. 204 */ 205void * 206zio_buf_alloc(size_t size) 207{ 208 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 209 210 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 211 212 if (zio_use_uma) 213 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 214 else 215 return (kmem_alloc(size, KM_SLEEP)); 216} 217 218/* 219 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 220 * crashdump if the kernel panics. This exists so that we will limit the amount 221 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 222 * of kernel heap dumped to disk when the kernel panics) 223 */ 224void * 225zio_data_buf_alloc(size_t size) 226{ 227 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 228 229 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 230 231 if (zio_use_uma) 232 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 233 else 234 return (kmem_alloc(size, KM_SLEEP)); 235} 236 237void 238zio_buf_free(void *buf, size_t size) 239{ 240 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 241 242 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 243 244 if (zio_use_uma) 245 kmem_cache_free(zio_buf_cache[c], buf); 246 else 247 kmem_free(buf, size); 248} 249 250void 251zio_data_buf_free(void *buf, size_t size) 252{ 253 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 254 255 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 256 257 if (zio_use_uma) 258 kmem_cache_free(zio_data_buf_cache[c], buf); 259 else 260 kmem_free(buf, size); 261} 262 263/* 264 * ========================================================================== 265 * Push and pop I/O transform buffers 266 * ========================================================================== 267 */ 268static void 269zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 270 zio_transform_func_t *transform) 271{ 272 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 273 274 zt->zt_orig_data = zio->io_data; 275 zt->zt_orig_size = zio->io_size; 276 zt->zt_bufsize = bufsize; 277 zt->zt_transform = transform; 278 279 zt->zt_next = zio->io_transform_stack; 280 zio->io_transform_stack = zt; 281 282 zio->io_data = data; 283 zio->io_size = size; 284} 285 286static void 287zio_pop_transforms(zio_t *zio) 288{ 289 zio_transform_t *zt; 290 291 while ((zt = zio->io_transform_stack) != NULL) { 292 if (zt->zt_transform != NULL) 293 zt->zt_transform(zio, 294 zt->zt_orig_data, zt->zt_orig_size); 295 296 if (zt->zt_bufsize != 0) 297 zio_buf_free(zio->io_data, zt->zt_bufsize); 298 299 zio->io_data = zt->zt_orig_data; 300 zio->io_size = zt->zt_orig_size; 301 zio->io_transform_stack = zt->zt_next; 302 303 kmem_free(zt, sizeof (zio_transform_t)); 304 } 305} 306 307/* 308 * ========================================================================== 309 * I/O transform callbacks for subblocks and decompression 310 * ========================================================================== 311 */ 312static void 313zio_subblock(zio_t *zio, void *data, uint64_t size) 314{ 315 ASSERT(zio->io_size > size); 316 317 if (zio->io_type == ZIO_TYPE_READ) 318 bcopy(zio->io_data, data, size); 319} 320 321static void 322zio_decompress(zio_t *zio, void *data, uint64_t size) 323{ 324 if (zio->io_error == 0 && 325 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 326 zio->io_data, data, zio->io_size, size) != 0) 327 zio->io_error = EIO; 328} 329 330/* 331 * ========================================================================== 332 * I/O parent/child relationships and pipeline interlocks 333 * ========================================================================== 334 */ 335/* 336 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 337 * continue calling these functions until they return NULL. 338 * Otherwise, the next caller will pick up the list walk in 339 * some indeterminate state. (Otherwise every caller would 340 * have to pass in a cookie to keep the state represented by 341 * io_walk_link, which gets annoying.) 342 */ 343zio_t * 344zio_walk_parents(zio_t *cio) 345{ 346 zio_link_t *zl = cio->io_walk_link; 347 list_t *pl = &cio->io_parent_list; 348 349 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 350 cio->io_walk_link = zl; 351 352 if (zl == NULL) 353 return (NULL); 354 355 ASSERT(zl->zl_child == cio); 356 return (zl->zl_parent); 357} 358 359zio_t * 360zio_walk_children(zio_t *pio) 361{ 362 zio_link_t *zl = pio->io_walk_link; 363 list_t *cl = &pio->io_child_list; 364 365 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 366 pio->io_walk_link = zl; 367 368 if (zl == NULL) 369 return (NULL); 370 371 ASSERT(zl->zl_parent == pio); 372 return (zl->zl_child); 373} 374 375zio_t * 376zio_unique_parent(zio_t *cio) 377{ 378 zio_t *pio = zio_walk_parents(cio); 379 380 VERIFY(zio_walk_parents(cio) == NULL); 381 return (pio); 382} 383 384void 385zio_add_child(zio_t *pio, zio_t *cio) 386{ 387 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 388 389 /* 390 * Logical I/Os can have logical, gang, or vdev children. 391 * Gang I/Os can have gang or vdev children. 392 * Vdev I/Os can only have vdev children. 393 * The following ASSERT captures all of these constraints. 394 */ 395 ASSERT(cio->io_child_type <= pio->io_child_type); 396 397 zl->zl_parent = pio; 398 zl->zl_child = cio; 399 400 mutex_enter(&cio->io_lock); 401 mutex_enter(&pio->io_lock); 402 403 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 404 405 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 406 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 407 408 list_insert_head(&pio->io_child_list, zl); 409 list_insert_head(&cio->io_parent_list, zl); 410 411 pio->io_child_count++; 412 cio->io_parent_count++; 413 414 mutex_exit(&pio->io_lock); 415 mutex_exit(&cio->io_lock); 416} 417 418static void 419zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 420{ 421 ASSERT(zl->zl_parent == pio); 422 ASSERT(zl->zl_child == cio); 423 424 mutex_enter(&cio->io_lock); 425 mutex_enter(&pio->io_lock); 426 427 list_remove(&pio->io_child_list, zl); 428 list_remove(&cio->io_parent_list, zl); 429 430 pio->io_child_count--; 431 cio->io_parent_count--; 432 433 mutex_exit(&pio->io_lock); 434 mutex_exit(&cio->io_lock); 435 436 kmem_cache_free(zio_link_cache, zl); 437} 438 439static boolean_t 440zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 441{ 442 uint64_t *countp = &zio->io_children[child][wait]; 443 boolean_t waiting = B_FALSE; 444 445 mutex_enter(&zio->io_lock); 446 ASSERT(zio->io_stall == NULL); 447 if (*countp != 0) { 448 zio->io_stage >>= 1; 449 zio->io_stall = countp; 450 waiting = B_TRUE; 451 } 452 mutex_exit(&zio->io_lock); 453 454 return (waiting); 455} 456 457static void 458zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 459{ 460 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 461 int *errorp = &pio->io_child_error[zio->io_child_type]; 462 463 mutex_enter(&pio->io_lock); 464 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 465 *errorp = zio_worst_error(*errorp, zio->io_error); 466 pio->io_reexecute |= zio->io_reexecute; 467 ASSERT3U(*countp, >, 0); 468 if (--*countp == 0 && pio->io_stall == countp) { 469 pio->io_stall = NULL; 470 mutex_exit(&pio->io_lock); 471 zio_execute(pio); 472 } else { 473 mutex_exit(&pio->io_lock); 474 } 475} 476 477static void 478zio_inherit_child_errors(zio_t *zio, enum zio_child c) 479{ 480 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 481 zio->io_error = zio->io_child_error[c]; 482} 483 484/* 485 * ========================================================================== 486 * Create the various types of I/O (read, write, free, etc) 487 * ========================================================================== 488 */ 489static zio_t * 490zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 491 void *data, uint64_t size, zio_done_func_t *done, void *private, 492 zio_type_t type, int priority, enum zio_flag flags, 493 vdev_t *vd, uint64_t offset, const zbookmark_t *zb, 494 enum zio_stage stage, enum zio_stage pipeline) 495{ 496 zio_t *zio; 497 498 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 499 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 500 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 501 502 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 503 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 504 ASSERT(vd || stage == ZIO_STAGE_OPEN); 505 506 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 507 bzero(zio, sizeof (zio_t)); 508 509 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 510 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 511 512 list_create(&zio->io_parent_list, sizeof (zio_link_t), 513 offsetof(zio_link_t, zl_parent_node)); 514 list_create(&zio->io_child_list, sizeof (zio_link_t), 515 offsetof(zio_link_t, zl_child_node)); 516 517 if (vd != NULL) 518 zio->io_child_type = ZIO_CHILD_VDEV; 519 else if (flags & ZIO_FLAG_GANG_CHILD) 520 zio->io_child_type = ZIO_CHILD_GANG; 521 else if (flags & ZIO_FLAG_DDT_CHILD) 522 zio->io_child_type = ZIO_CHILD_DDT; 523 else 524 zio->io_child_type = ZIO_CHILD_LOGICAL; 525 526 if (bp != NULL) { 527 zio->io_bp = (blkptr_t *)bp; 528 zio->io_bp_copy = *bp; 529 zio->io_bp_orig = *bp; 530 if (type != ZIO_TYPE_WRITE || 531 zio->io_child_type == ZIO_CHILD_DDT) 532 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 533 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 534 zio->io_logical = zio; 535 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 536 pipeline |= ZIO_GANG_STAGES; 537 } 538 539 zio->io_spa = spa; 540 zio->io_txg = txg; 541 zio->io_done = done; 542 zio->io_private = private; 543 zio->io_type = type; 544 zio->io_priority = priority; 545 zio->io_vd = vd; 546 zio->io_offset = offset; 547 zio->io_orig_data = zio->io_data = data; 548 zio->io_orig_size = zio->io_size = size; 549 zio->io_orig_flags = zio->io_flags = flags; 550 zio->io_orig_stage = zio->io_stage = stage; 551 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 552 553 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 554 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 555 556 if (zb != NULL) 557 zio->io_bookmark = *zb; 558 559 if (pio != NULL) { 560 if (zio->io_logical == NULL) 561 zio->io_logical = pio->io_logical; 562 if (zio->io_child_type == ZIO_CHILD_GANG) 563 zio->io_gang_leader = pio->io_gang_leader; 564 zio_add_child(pio, zio); 565 } 566 567 return (zio); 568} 569 570static void 571zio_destroy(zio_t *zio) 572{ 573 list_destroy(&zio->io_parent_list); 574 list_destroy(&zio->io_child_list); 575 mutex_destroy(&zio->io_lock); 576 cv_destroy(&zio->io_cv); 577 kmem_cache_free(zio_cache, zio); 578} 579 580zio_t * 581zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 582 void *private, enum zio_flag flags) 583{ 584 zio_t *zio; 585 586 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 587 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 588 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 589 590 return (zio); 591} 592 593zio_t * 594zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 595{ 596 return (zio_null(NULL, spa, NULL, done, private, flags)); 597} 598 599zio_t * 600zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 601 void *data, uint64_t size, zio_done_func_t *done, void *private, 602 int priority, enum zio_flag flags, const zbookmark_t *zb) 603{ 604 zio_t *zio; 605 606 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 607 data, size, done, private, 608 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 609 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 610 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 611 612 return (zio); 613} 614 615zio_t * 616zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 617 void *data, uint64_t size, const zio_prop_t *zp, 618 zio_done_func_t *ready, zio_done_func_t *done, void *private, 619 int priority, enum zio_flag flags, const zbookmark_t *zb) 620{ 621 zio_t *zio; 622 623 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 624 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 625 zp->zp_compress >= ZIO_COMPRESS_OFF && 626 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 627 zp->zp_type < DMU_OT_NUMTYPES && 628 zp->zp_level < 32 && 629 zp->zp_copies > 0 && 630 zp->zp_copies <= spa_max_replication(spa) && 631 zp->zp_dedup <= 1 && 632 zp->zp_dedup_verify <= 1); 633 634 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 635 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 636 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 637 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 638 639 zio->io_ready = ready; 640 zio->io_prop = *zp; 641 642 return (zio); 643} 644 645zio_t * 646zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 647 uint64_t size, zio_done_func_t *done, void *private, int priority, 648 enum zio_flag flags, zbookmark_t *zb) 649{ 650 zio_t *zio; 651 652 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 653 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 654 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 655 656 return (zio); 657} 658 659void 660zio_write_override(zio_t *zio, blkptr_t *bp, int copies) 661{ 662 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 663 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 664 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 665 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 666 667 zio->io_prop.zp_copies = copies; 668 zio->io_bp_override = bp; 669} 670 671void 672zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 673{ 674 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 675} 676 677zio_t * 678zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 679 enum zio_flag flags) 680{ 681 zio_t *zio; 682 683 dprintf_bp(bp, "freeing in txg %llu, pass %u", 684 (longlong_t)txg, spa->spa_sync_pass); 685 686 ASSERT(!BP_IS_HOLE(bp)); 687 ASSERT(spa_syncing_txg(spa) == txg); 688 ASSERT(spa_sync_pass(spa) <= SYNC_PASS_DEFERRED_FREE); 689 690 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 691 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags, 692 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 693 694 return (zio); 695} 696 697zio_t * 698zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 699 zio_done_func_t *done, void *private, enum zio_flag flags) 700{ 701 zio_t *zio; 702 703 /* 704 * A claim is an allocation of a specific block. Claims are needed 705 * to support immediate writes in the intent log. The issue is that 706 * immediate writes contain committed data, but in a txg that was 707 * *not* committed. Upon opening the pool after an unclean shutdown, 708 * the intent log claims all blocks that contain immediate write data 709 * so that the SPA knows they're in use. 710 * 711 * All claims *must* be resolved in the first txg -- before the SPA 712 * starts allocating blocks -- so that nothing is allocated twice. 713 * If txg == 0 we just verify that the block is claimable. 714 */ 715 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 716 ASSERT(txg == spa_first_txg(spa) || txg == 0); 717 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 718 719 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 720 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 721 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 722 723 return (zio); 724} 725 726zio_t * 727zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 728 zio_done_func_t *done, void *private, int priority, enum zio_flag flags) 729{ 730 zio_t *zio; 731 int c; 732 733 if (vd->vdev_children == 0) { 734 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 735 ZIO_TYPE_IOCTL, priority, flags, vd, 0, NULL, 736 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 737 738 zio->io_cmd = cmd; 739 } else { 740 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 741 742 for (c = 0; c < vd->vdev_children; c++) 743 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 744 done, private, priority, flags)); 745 } 746 747 return (zio); 748} 749 750zio_t * 751zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 752 void *data, int checksum, zio_done_func_t *done, void *private, 753 int priority, enum zio_flag flags, boolean_t labels) 754{ 755 zio_t *zio; 756 757 ASSERT(vd->vdev_children == 0); 758 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 759 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 760 ASSERT3U(offset + size, <=, vd->vdev_psize); 761 762 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 763 ZIO_TYPE_READ, priority, flags, vd, offset, NULL, 764 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 765 766 zio->io_prop.zp_checksum = checksum; 767 768 return (zio); 769} 770 771zio_t * 772zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 773 void *data, int checksum, zio_done_func_t *done, void *private, 774 int priority, enum zio_flag flags, boolean_t labels) 775{ 776 zio_t *zio; 777 778 ASSERT(vd->vdev_children == 0); 779 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 780 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 781 ASSERT3U(offset + size, <=, vd->vdev_psize); 782 783 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 784 ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, 785 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 786 787 zio->io_prop.zp_checksum = checksum; 788 789 if (zio_checksum_table[checksum].ci_eck) { 790 /* 791 * zec checksums are necessarily destructive -- they modify 792 * the end of the write buffer to hold the verifier/checksum. 793 * Therefore, we must make a local copy in case the data is 794 * being written to multiple places in parallel. 795 */ 796 void *wbuf = zio_buf_alloc(size); 797 bcopy(data, wbuf, size); 798 zio_push_transform(zio, wbuf, size, size, NULL); 799 } 800 801 return (zio); 802} 803 804/* 805 * Create a child I/O to do some work for us. 806 */ 807zio_t * 808zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 809 void *data, uint64_t size, int type, int priority, enum zio_flag flags, 810 zio_done_func_t *done, void *private) 811{ 812 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 813 zio_t *zio; 814 815 ASSERT(vd->vdev_parent == 816 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 817 818 if (type == ZIO_TYPE_READ && bp != NULL) { 819 /* 820 * If we have the bp, then the child should perform the 821 * checksum and the parent need not. This pushes error 822 * detection as close to the leaves as possible and 823 * eliminates redundant checksums in the interior nodes. 824 */ 825 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 826 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 827 } 828 829 if (vd->vdev_children == 0) 830 offset += VDEV_LABEL_START_SIZE; 831 832 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 833 834 /* 835 * If we've decided to do a repair, the write is not speculative -- 836 * even if the original read was. 837 */ 838 if (flags & ZIO_FLAG_IO_REPAIR) 839 flags &= ~ZIO_FLAG_SPECULATIVE; 840 841 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 842 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 843 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 844 845 return (zio); 846} 847 848zio_t * 849zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 850 int type, int priority, enum zio_flag flags, 851 zio_done_func_t *done, void *private) 852{ 853 zio_t *zio; 854 855 ASSERT(vd->vdev_ops->vdev_op_leaf); 856 857 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 858 data, size, done, private, type, priority, 859 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY, 860 vd, offset, NULL, 861 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 862 863 return (zio); 864} 865 866void 867zio_flush(zio_t *zio, vdev_t *vd) 868{ 869 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 870 NULL, NULL, ZIO_PRIORITY_NOW, 871 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 872} 873 874void 875zio_shrink(zio_t *zio, uint64_t size) 876{ 877 ASSERT(zio->io_executor == NULL); 878 ASSERT(zio->io_orig_size == zio->io_size); 879 ASSERT(size <= zio->io_size); 880 881 /* 882 * We don't shrink for raidz because of problems with the 883 * reconstruction when reading back less than the block size. 884 * Note, BP_IS_RAIDZ() assumes no compression. 885 */ 886 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 887 if (!BP_IS_RAIDZ(zio->io_bp)) 888 zio->io_orig_size = zio->io_size = size; 889} 890 891/* 892 * ========================================================================== 893 * Prepare to read and write logical blocks 894 * ========================================================================== 895 */ 896 897static int 898zio_read_bp_init(zio_t *zio) 899{ 900 blkptr_t *bp = zio->io_bp; 901 902 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 903 zio->io_child_type == ZIO_CHILD_LOGICAL && 904 !(zio->io_flags & ZIO_FLAG_RAW)) { 905 uint64_t psize = BP_GET_PSIZE(bp); 906 void *cbuf = zio_buf_alloc(psize); 907 908 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 909 } 910 911 if (!dmu_ot[BP_GET_TYPE(bp)].ot_metadata && BP_GET_LEVEL(bp) == 0) 912 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 913 914 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 915 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 916 917 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 918 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 919 920 return (ZIO_PIPELINE_CONTINUE); 921} 922 923static int 924zio_write_bp_init(zio_t *zio) 925{ 926 spa_t *spa = zio->io_spa; 927 zio_prop_t *zp = &zio->io_prop; 928 enum zio_compress compress = zp->zp_compress; 929 blkptr_t *bp = zio->io_bp; 930 uint64_t lsize = zio->io_size; 931 uint64_t psize = lsize; 932 int pass = 1; 933 934 /* 935 * If our children haven't all reached the ready stage, 936 * wait for them and then repeat this pipeline stage. 937 */ 938 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 939 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 940 return (ZIO_PIPELINE_STOP); 941 942 if (!IO_IS_ALLOCATING(zio)) 943 return (ZIO_PIPELINE_CONTINUE); 944 945 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 946 947 if (zio->io_bp_override) { 948 ASSERT(bp->blk_birth != zio->io_txg); 949 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 950 951 *bp = *zio->io_bp_override; 952 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 953 954 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 955 return (ZIO_PIPELINE_CONTINUE); 956 957 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 958 zp->zp_dedup_verify); 959 960 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 961 BP_SET_DEDUP(bp, 1); 962 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 963 return (ZIO_PIPELINE_CONTINUE); 964 } 965 zio->io_bp_override = NULL; 966 BP_ZERO(bp); 967 } 968 969 if (bp->blk_birth == zio->io_txg) { 970 /* 971 * We're rewriting an existing block, which means we're 972 * working on behalf of spa_sync(). For spa_sync() to 973 * converge, it must eventually be the case that we don't 974 * have to allocate new blocks. But compression changes 975 * the blocksize, which forces a reallocate, and makes 976 * convergence take longer. Therefore, after the first 977 * few passes, stop compressing to ensure convergence. 978 */ 979 pass = spa_sync_pass(spa); 980 981 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 982 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 983 ASSERT(!BP_GET_DEDUP(bp)); 984 985 if (pass > SYNC_PASS_DONT_COMPRESS) 986 compress = ZIO_COMPRESS_OFF; 987 988 /* Make sure someone doesn't change their mind on overwrites */ 989 ASSERT(MIN(zp->zp_copies + BP_IS_GANG(bp), 990 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 991 } 992 993 if (compress != ZIO_COMPRESS_OFF) { 994 void *cbuf = zio_buf_alloc(lsize); 995 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 996 if (psize == 0 || psize == lsize) { 997 compress = ZIO_COMPRESS_OFF; 998 zio_buf_free(cbuf, lsize); 999 } else { 1000 ASSERT(psize < lsize); 1001 zio_push_transform(zio, cbuf, psize, lsize, NULL); 1002 } 1003 } 1004 1005 /* 1006 * The final pass of spa_sync() must be all rewrites, but the first 1007 * few passes offer a trade-off: allocating blocks defers convergence, 1008 * but newly allocated blocks are sequential, so they can be written 1009 * to disk faster. Therefore, we allow the first few passes of 1010 * spa_sync() to allocate new blocks, but force rewrites after that. 1011 * There should only be a handful of blocks after pass 1 in any case. 1012 */ 1013 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize && 1014 pass > SYNC_PASS_REWRITE) { 1015 ASSERT(psize != 0); 1016 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1017 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1018 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1019 } else { 1020 BP_ZERO(bp); 1021 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1022 } 1023 1024 if (psize == 0) { 1025 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1026 } else { 1027 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1028 BP_SET_LSIZE(bp, lsize); 1029 BP_SET_PSIZE(bp, psize); 1030 BP_SET_COMPRESS(bp, compress); 1031 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1032 BP_SET_TYPE(bp, zp->zp_type); 1033 BP_SET_LEVEL(bp, zp->zp_level); 1034 BP_SET_DEDUP(bp, zp->zp_dedup); 1035 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1036 if (zp->zp_dedup) { 1037 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1038 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1039 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1040 } 1041 } 1042 1043 return (ZIO_PIPELINE_CONTINUE); 1044} 1045 1046static int 1047zio_free_bp_init(zio_t *zio) 1048{ 1049 blkptr_t *bp = zio->io_bp; 1050 1051 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1052 if (BP_GET_DEDUP(bp)) 1053 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1054 } 1055 1056 return (ZIO_PIPELINE_CONTINUE); 1057} 1058 1059/* 1060 * ========================================================================== 1061 * Execute the I/O pipeline 1062 * ========================================================================== 1063 */ 1064 1065static void 1066zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q, boolean_t cutinline) 1067{ 1068 spa_t *spa = zio->io_spa; 1069 zio_type_t t = zio->io_type; 1070 int flags = TQ_SLEEP | (cutinline ? TQ_FRONT : 0); 1071#ifdef _KERNEL 1072 struct ostask *task; 1073#endif 1074 1075 ASSERT(q == ZIO_TASKQ_ISSUE || q == ZIO_TASKQ_INTERRUPT); 1076 1077#ifdef _KERNEL 1078 if (q == ZIO_TASKQ_ISSUE) 1079 task = &zio->io_task_issue; 1080 else /* if (q == ZIO_TASKQ_INTERRUPT) */ 1081 task = &zio->io_task_interrupt; 1082#endif 1083 1084 /* 1085 * If we're a config writer or a probe, the normal issue and 1086 * interrupt threads may all be blocked waiting for the config lock. 1087 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1088 */ 1089 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1090 t = ZIO_TYPE_NULL; 1091 1092 /* 1093 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1094 */ 1095 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1096 t = ZIO_TYPE_NULL; 1097 1098 /* 1099 * If this is a high priority I/O, then use the high priority taskq. 1100 */ 1101 if (zio->io_priority == ZIO_PRIORITY_NOW && 1102 spa->spa_zio_taskq[t][q + 1] != NULL) 1103 q++; 1104 1105 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1106#ifdef _KERNEL 1107 (void) taskq_dispatch_safe(spa->spa_zio_taskq[t][q], 1108 (task_func_t *)zio_execute, zio, flags, task); 1109#else 1110 (void) taskq_dispatch(spa->spa_zio_taskq[t][q], 1111 (task_func_t *)zio_execute, zio, flags); 1112#endif 1113} 1114 1115static boolean_t 1116zio_taskq_member(zio_t *zio, enum zio_taskq_type q) 1117{ 1118 kthread_t *executor = zio->io_executor; 1119 spa_t *spa = zio->io_spa; 1120 1121 for (zio_type_t t = 0; t < ZIO_TYPES; t++) 1122 if (taskq_member(spa->spa_zio_taskq[t][q], executor)) 1123 return (B_TRUE); 1124 1125 return (B_FALSE); 1126} 1127 1128static int 1129zio_issue_async(zio_t *zio) 1130{ 1131 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1132 1133 return (ZIO_PIPELINE_STOP); 1134} 1135 1136void 1137zio_interrupt(zio_t *zio) 1138{ 1139 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1140} 1141 1142/* 1143 * Execute the I/O pipeline until one of the following occurs: 1144 * (1) the I/O completes; (2) the pipeline stalls waiting for 1145 * dependent child I/Os; (3) the I/O issues, so we're waiting 1146 * for an I/O completion interrupt; (4) the I/O is delegated by 1147 * vdev-level caching or aggregation; (5) the I/O is deferred 1148 * due to vdev-level queueing; (6) the I/O is handed off to 1149 * another thread. In all cases, the pipeline stops whenever 1150 * there's no CPU work; it never burns a thread in cv_wait(). 1151 * 1152 * There's no locking on io_stage because there's no legitimate way 1153 * for multiple threads to be attempting to process the same I/O. 1154 */ 1155static zio_pipe_stage_t *zio_pipeline[]; 1156 1157void 1158zio_execute(zio_t *zio) 1159{ 1160 zio->io_executor = curthread; 1161 1162 while (zio->io_stage < ZIO_STAGE_DONE) { 1163 enum zio_stage pipeline = zio->io_pipeline; 1164 enum zio_stage stage = zio->io_stage; 1165 int rv; 1166 1167 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1168 ASSERT(ISP2(stage)); 1169 ASSERT(zio->io_stall == NULL); 1170 1171 do { 1172 stage <<= 1; 1173 } while ((stage & pipeline) == 0); 1174 1175 ASSERT(stage <= ZIO_STAGE_DONE); 1176 1177 /* 1178 * If we are in interrupt context and this pipeline stage 1179 * will grab a config lock that is held across I/O, 1180 * or may wait for an I/O that needs an interrupt thread 1181 * to complete, issue async to avoid deadlock. 1182 * 1183 * For VDEV_IO_START, we cut in line so that the io will 1184 * be sent to disk promptly. 1185 */ 1186 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1187 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1188 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1189 zio_requeue_io_start_cut_in_line : B_FALSE; 1190 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1191 return; 1192 } 1193 1194 zio->io_stage = stage; 1195 rv = zio_pipeline[highbit(stage) - 1](zio); 1196 1197 if (rv == ZIO_PIPELINE_STOP) 1198 return; 1199 1200 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1201 } 1202} 1203 1204/* 1205 * ========================================================================== 1206 * Initiate I/O, either sync or async 1207 * ========================================================================== 1208 */ 1209int 1210zio_wait(zio_t *zio) 1211{ 1212 int error; 1213 1214 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1215 ASSERT(zio->io_executor == NULL); 1216 1217 zio->io_waiter = curthread; 1218 1219 zio_execute(zio); 1220 1221 mutex_enter(&zio->io_lock); 1222 while (zio->io_executor != NULL) 1223 cv_wait(&zio->io_cv, &zio->io_lock); 1224 mutex_exit(&zio->io_lock); 1225 1226 error = zio->io_error; 1227 zio_destroy(zio); 1228 1229 return (error); 1230} 1231 1232void 1233zio_nowait(zio_t *zio) 1234{ 1235 ASSERT(zio->io_executor == NULL); 1236 1237 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1238 zio_unique_parent(zio) == NULL) { 1239 /* 1240 * This is a logical async I/O with no parent to wait for it. 1241 * We add it to the spa_async_root_zio "Godfather" I/O which 1242 * will ensure they complete prior to unloading the pool. 1243 */ 1244 spa_t *spa = zio->io_spa; 1245 1246 zio_add_child(spa->spa_async_zio_root, zio); 1247 } 1248 1249 zio_execute(zio); 1250} 1251 1252/* 1253 * ========================================================================== 1254 * Reexecute or suspend/resume failed I/O 1255 * ========================================================================== 1256 */ 1257 1258static void 1259zio_reexecute(zio_t *pio) 1260{ 1261 zio_t *cio, *cio_next; 1262 1263 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1264 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1265 ASSERT(pio->io_gang_leader == NULL); 1266 ASSERT(pio->io_gang_tree == NULL); 1267 1268 pio->io_flags = pio->io_orig_flags; 1269 pio->io_stage = pio->io_orig_stage; 1270 pio->io_pipeline = pio->io_orig_pipeline; 1271 pio->io_reexecute = 0; 1272 pio->io_error = 0; 1273 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1274 pio->io_state[w] = 0; 1275 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1276 pio->io_child_error[c] = 0; 1277 1278 if (IO_IS_ALLOCATING(pio)) 1279 BP_ZERO(pio->io_bp); 1280 1281 /* 1282 * As we reexecute pio's children, new children could be created. 1283 * New children go to the head of pio's io_child_list, however, 1284 * so we will (correctly) not reexecute them. The key is that 1285 * the remainder of pio's io_child_list, from 'cio_next' onward, 1286 * cannot be affected by any side effects of reexecuting 'cio'. 1287 */ 1288 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1289 cio_next = zio_walk_children(pio); 1290 mutex_enter(&pio->io_lock); 1291 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1292 pio->io_children[cio->io_child_type][w]++; 1293 mutex_exit(&pio->io_lock); 1294 zio_reexecute(cio); 1295 } 1296 1297 /* 1298 * Now that all children have been reexecuted, execute the parent. 1299 * We don't reexecute "The Godfather" I/O here as it's the 1300 * responsibility of the caller to wait on him. 1301 */ 1302 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1303 zio_execute(pio); 1304} 1305 1306void 1307zio_suspend(spa_t *spa, zio_t *zio) 1308{ 1309 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1310 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1311 "failure and the failure mode property for this pool " 1312 "is set to panic.", spa_name(spa)); 1313 1314 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1315 1316 mutex_enter(&spa->spa_suspend_lock); 1317 1318 if (spa->spa_suspend_zio_root == NULL) 1319 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1320 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1321 ZIO_FLAG_GODFATHER); 1322 1323 spa->spa_suspended = B_TRUE; 1324 1325 if (zio != NULL) { 1326 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1327 ASSERT(zio != spa->spa_suspend_zio_root); 1328 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1329 ASSERT(zio_unique_parent(zio) == NULL); 1330 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1331 zio_add_child(spa->spa_suspend_zio_root, zio); 1332 } 1333 1334 mutex_exit(&spa->spa_suspend_lock); 1335} 1336 1337int 1338zio_resume(spa_t *spa) 1339{ 1340 zio_t *pio; 1341 1342 /* 1343 * Reexecute all previously suspended i/o. 1344 */ 1345 mutex_enter(&spa->spa_suspend_lock); 1346 spa->spa_suspended = B_FALSE; 1347 cv_broadcast(&spa->spa_suspend_cv); 1348 pio = spa->spa_suspend_zio_root; 1349 spa->spa_suspend_zio_root = NULL; 1350 mutex_exit(&spa->spa_suspend_lock); 1351 1352 if (pio == NULL) 1353 return (0); 1354 1355 zio_reexecute(pio); 1356 return (zio_wait(pio)); 1357} 1358 1359void 1360zio_resume_wait(spa_t *spa) 1361{ 1362 mutex_enter(&spa->spa_suspend_lock); 1363 while (spa_suspended(spa)) 1364 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1365 mutex_exit(&spa->spa_suspend_lock); 1366} 1367 1368/* 1369 * ========================================================================== 1370 * Gang blocks. 1371 * 1372 * A gang block is a collection of small blocks that looks to the DMU 1373 * like one large block. When zio_dva_allocate() cannot find a block 1374 * of the requested size, due to either severe fragmentation or the pool 1375 * being nearly full, it calls zio_write_gang_block() to construct the 1376 * block from smaller fragments. 1377 * 1378 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1379 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1380 * an indirect block: it's an array of block pointers. It consumes 1381 * only one sector and hence is allocatable regardless of fragmentation. 1382 * The gang header's bps point to its gang members, which hold the data. 1383 * 1384 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1385 * as the verifier to ensure uniqueness of the SHA256 checksum. 1386 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1387 * not the gang header. This ensures that data block signatures (needed for 1388 * deduplication) are independent of how the block is physically stored. 1389 * 1390 * Gang blocks can be nested: a gang member may itself be a gang block. 1391 * Thus every gang block is a tree in which root and all interior nodes are 1392 * gang headers, and the leaves are normal blocks that contain user data. 1393 * The root of the gang tree is called the gang leader. 1394 * 1395 * To perform any operation (read, rewrite, free, claim) on a gang block, 1396 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1397 * in the io_gang_tree field of the original logical i/o by recursively 1398 * reading the gang leader and all gang headers below it. This yields 1399 * an in-core tree containing the contents of every gang header and the 1400 * bps for every constituent of the gang block. 1401 * 1402 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1403 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1404 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1405 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1406 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1407 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1408 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1409 * of the gang header plus zio_checksum_compute() of the data to update the 1410 * gang header's blk_cksum as described above. 1411 * 1412 * The two-phase assemble/issue model solves the problem of partial failure -- 1413 * what if you'd freed part of a gang block but then couldn't read the 1414 * gang header for another part? Assembling the entire gang tree first 1415 * ensures that all the necessary gang header I/O has succeeded before 1416 * starting the actual work of free, claim, or write. Once the gang tree 1417 * is assembled, free and claim are in-memory operations that cannot fail. 1418 * 1419 * In the event that a gang write fails, zio_dva_unallocate() walks the 1420 * gang tree to immediately free (i.e. insert back into the space map) 1421 * everything we've allocated. This ensures that we don't get ENOSPC 1422 * errors during repeated suspend/resume cycles due to a flaky device. 1423 * 1424 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1425 * the gang tree, we won't modify the block, so we can safely defer the free 1426 * (knowing that the block is still intact). If we *can* assemble the gang 1427 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1428 * each constituent bp and we can allocate a new block on the next sync pass. 1429 * 1430 * In all cases, the gang tree allows complete recovery from partial failure. 1431 * ========================================================================== 1432 */ 1433 1434static zio_t * 1435zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1436{ 1437 if (gn != NULL) 1438 return (pio); 1439 1440 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1441 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1442 &pio->io_bookmark)); 1443} 1444 1445zio_t * 1446zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1447{ 1448 zio_t *zio; 1449 1450 if (gn != NULL) { 1451 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1452 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1453 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1454 /* 1455 * As we rewrite each gang header, the pipeline will compute 1456 * a new gang block header checksum for it; but no one will 1457 * compute a new data checksum, so we do that here. The one 1458 * exception is the gang leader: the pipeline already computed 1459 * its data checksum because that stage precedes gang assembly. 1460 * (Presently, nothing actually uses interior data checksums; 1461 * this is just good hygiene.) 1462 */ 1463 if (gn != pio->io_gang_leader->io_gang_tree) { 1464 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1465 data, BP_GET_PSIZE(bp)); 1466 } 1467 /* 1468 * If we are here to damage data for testing purposes, 1469 * leave the GBH alone so that we can detect the damage. 1470 */ 1471 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1472 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1473 } else { 1474 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1475 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1476 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1477 } 1478 1479 return (zio); 1480} 1481 1482/* ARGSUSED */ 1483zio_t * 1484zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1485{ 1486 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1487 ZIO_GANG_CHILD_FLAGS(pio))); 1488} 1489 1490/* ARGSUSED */ 1491zio_t * 1492zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1493{ 1494 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1495 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1496} 1497 1498static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1499 NULL, 1500 zio_read_gang, 1501 zio_rewrite_gang, 1502 zio_free_gang, 1503 zio_claim_gang, 1504 NULL 1505}; 1506 1507static void zio_gang_tree_assemble_done(zio_t *zio); 1508 1509static zio_gang_node_t * 1510zio_gang_node_alloc(zio_gang_node_t **gnpp) 1511{ 1512 zio_gang_node_t *gn; 1513 1514 ASSERT(*gnpp == NULL); 1515 1516 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1517 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1518 *gnpp = gn; 1519 1520 return (gn); 1521} 1522 1523static void 1524zio_gang_node_free(zio_gang_node_t **gnpp) 1525{ 1526 zio_gang_node_t *gn = *gnpp; 1527 1528 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1529 ASSERT(gn->gn_child[g] == NULL); 1530 1531 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1532 kmem_free(gn, sizeof (*gn)); 1533 *gnpp = NULL; 1534} 1535 1536static void 1537zio_gang_tree_free(zio_gang_node_t **gnpp) 1538{ 1539 zio_gang_node_t *gn = *gnpp; 1540 1541 if (gn == NULL) 1542 return; 1543 1544 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1545 zio_gang_tree_free(&gn->gn_child[g]); 1546 1547 zio_gang_node_free(gnpp); 1548} 1549 1550static void 1551zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1552{ 1553 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1554 1555 ASSERT(gio->io_gang_leader == gio); 1556 ASSERT(BP_IS_GANG(bp)); 1557 1558 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1559 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1560 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1561} 1562 1563static void 1564zio_gang_tree_assemble_done(zio_t *zio) 1565{ 1566 zio_t *gio = zio->io_gang_leader; 1567 zio_gang_node_t *gn = zio->io_private; 1568 blkptr_t *bp = zio->io_bp; 1569 1570 ASSERT(gio == zio_unique_parent(zio)); 1571 ASSERT(zio->io_child_count == 0); 1572 1573 if (zio->io_error) 1574 return; 1575 1576 if (BP_SHOULD_BYTESWAP(bp)) 1577 byteswap_uint64_array(zio->io_data, zio->io_size); 1578 1579 ASSERT(zio->io_data == gn->gn_gbh); 1580 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1581 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1582 1583 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1584 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1585 if (!BP_IS_GANG(gbp)) 1586 continue; 1587 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1588 } 1589} 1590 1591static void 1592zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1593{ 1594 zio_t *gio = pio->io_gang_leader; 1595 zio_t *zio; 1596 1597 ASSERT(BP_IS_GANG(bp) == !!gn); 1598 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1599 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1600 1601 /* 1602 * If you're a gang header, your data is in gn->gn_gbh. 1603 * If you're a gang member, your data is in 'data' and gn == NULL. 1604 */ 1605 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1606 1607 if (gn != NULL) { 1608 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1609 1610 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1611 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1612 if (BP_IS_HOLE(gbp)) 1613 continue; 1614 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1615 data = (char *)data + BP_GET_PSIZE(gbp); 1616 } 1617 } 1618 1619 if (gn == gio->io_gang_tree) 1620 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1621 1622 if (zio != pio) 1623 zio_nowait(zio); 1624} 1625 1626static int 1627zio_gang_assemble(zio_t *zio) 1628{ 1629 blkptr_t *bp = zio->io_bp; 1630 1631 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1632 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1633 1634 zio->io_gang_leader = zio; 1635 1636 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1637 1638 return (ZIO_PIPELINE_CONTINUE); 1639} 1640 1641static int 1642zio_gang_issue(zio_t *zio) 1643{ 1644 blkptr_t *bp = zio->io_bp; 1645 1646 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1647 return (ZIO_PIPELINE_STOP); 1648 1649 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1650 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1651 1652 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1653 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1654 else 1655 zio_gang_tree_free(&zio->io_gang_tree); 1656 1657 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1658 1659 return (ZIO_PIPELINE_CONTINUE); 1660} 1661 1662static void 1663zio_write_gang_member_ready(zio_t *zio) 1664{ 1665 zio_t *pio = zio_unique_parent(zio); 1666 zio_t *gio = zio->io_gang_leader; 1667 dva_t *cdva = zio->io_bp->blk_dva; 1668 dva_t *pdva = pio->io_bp->blk_dva; 1669 uint64_t asize; 1670 1671 if (BP_IS_HOLE(zio->io_bp)) 1672 return; 1673 1674 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1675 1676 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1677 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1678 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1679 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1680 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1681 1682 mutex_enter(&pio->io_lock); 1683 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1684 ASSERT(DVA_GET_GANG(&pdva[d])); 1685 asize = DVA_GET_ASIZE(&pdva[d]); 1686 asize += DVA_GET_ASIZE(&cdva[d]); 1687 DVA_SET_ASIZE(&pdva[d], asize); 1688 } 1689 mutex_exit(&pio->io_lock); 1690} 1691 1692static int 1693zio_write_gang_block(zio_t *pio) 1694{ 1695 spa_t *spa = pio->io_spa; 1696 blkptr_t *bp = pio->io_bp; 1697 zio_t *gio = pio->io_gang_leader; 1698 zio_t *zio; 1699 zio_gang_node_t *gn, **gnpp; 1700 zio_gbh_phys_t *gbh; 1701 uint64_t txg = pio->io_txg; 1702 uint64_t resid = pio->io_size; 1703 uint64_t lsize; 1704 int copies = gio->io_prop.zp_copies; 1705 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 1706 zio_prop_t zp; 1707 int error; 1708 1709 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 1710 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 1711 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1712 if (error) { 1713 pio->io_error = error; 1714 return (ZIO_PIPELINE_CONTINUE); 1715 } 1716 1717 if (pio == gio) { 1718 gnpp = &gio->io_gang_tree; 1719 } else { 1720 gnpp = pio->io_private; 1721 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1722 } 1723 1724 gn = zio_gang_node_alloc(gnpp); 1725 gbh = gn->gn_gbh; 1726 bzero(gbh, SPA_GANGBLOCKSIZE); 1727 1728 /* 1729 * Create the gang header. 1730 */ 1731 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1732 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1733 1734 /* 1735 * Create and nowait the gang children. 1736 */ 1737 for (int g = 0; resid != 0; resid -= lsize, g++) { 1738 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1739 SPA_MINBLOCKSIZE); 1740 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1741 1742 zp.zp_checksum = gio->io_prop.zp_checksum; 1743 zp.zp_compress = ZIO_COMPRESS_OFF; 1744 zp.zp_type = DMU_OT_NONE; 1745 zp.zp_level = 0; 1746 zp.zp_copies = gio->io_prop.zp_copies; 1747 zp.zp_dedup = 0; 1748 zp.zp_dedup_verify = 0; 1749 1750 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1751 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1752 zio_write_gang_member_ready, NULL, &gn->gn_child[g], 1753 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1754 &pio->io_bookmark)); 1755 } 1756 1757 /* 1758 * Set pio's pipeline to just wait for zio to finish. 1759 */ 1760 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1761 1762 zio_nowait(zio); 1763 1764 return (ZIO_PIPELINE_CONTINUE); 1765} 1766 1767/* 1768 * ========================================================================== 1769 * Dedup 1770 * ========================================================================== 1771 */ 1772static void 1773zio_ddt_child_read_done(zio_t *zio) 1774{ 1775 blkptr_t *bp = zio->io_bp; 1776 ddt_entry_t *dde = zio->io_private; 1777 ddt_phys_t *ddp; 1778 zio_t *pio = zio_unique_parent(zio); 1779 1780 mutex_enter(&pio->io_lock); 1781 ddp = ddt_phys_select(dde, bp); 1782 if (zio->io_error == 0) 1783 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 1784 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 1785 dde->dde_repair_data = zio->io_data; 1786 else 1787 zio_buf_free(zio->io_data, zio->io_size); 1788 mutex_exit(&pio->io_lock); 1789} 1790 1791static int 1792zio_ddt_read_start(zio_t *zio) 1793{ 1794 blkptr_t *bp = zio->io_bp; 1795 1796 ASSERT(BP_GET_DEDUP(bp)); 1797 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1798 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1799 1800 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1801 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1802 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 1803 ddt_phys_t *ddp = dde->dde_phys; 1804 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 1805 blkptr_t blk; 1806 1807 ASSERT(zio->io_vsd == NULL); 1808 zio->io_vsd = dde; 1809 1810 if (ddp_self == NULL) 1811 return (ZIO_PIPELINE_CONTINUE); 1812 1813 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1814 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 1815 continue; 1816 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 1817 &blk); 1818 zio_nowait(zio_read(zio, zio->io_spa, &blk, 1819 zio_buf_alloc(zio->io_size), zio->io_size, 1820 zio_ddt_child_read_done, dde, zio->io_priority, 1821 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 1822 &zio->io_bookmark)); 1823 } 1824 return (ZIO_PIPELINE_CONTINUE); 1825 } 1826 1827 zio_nowait(zio_read(zio, zio->io_spa, bp, 1828 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 1829 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 1830 1831 return (ZIO_PIPELINE_CONTINUE); 1832} 1833 1834static int 1835zio_ddt_read_done(zio_t *zio) 1836{ 1837 blkptr_t *bp = zio->io_bp; 1838 1839 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 1840 return (ZIO_PIPELINE_STOP); 1841 1842 ASSERT(BP_GET_DEDUP(bp)); 1843 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1844 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1845 1846 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1847 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1848 ddt_entry_t *dde = zio->io_vsd; 1849 if (ddt == NULL) { 1850 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 1851 return (ZIO_PIPELINE_CONTINUE); 1852 } 1853 if (dde == NULL) { 1854 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 1855 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1856 return (ZIO_PIPELINE_STOP); 1857 } 1858 if (dde->dde_repair_data != NULL) { 1859 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 1860 zio->io_child_error[ZIO_CHILD_DDT] = 0; 1861 } 1862 ddt_repair_done(ddt, dde); 1863 zio->io_vsd = NULL; 1864 } 1865 1866 ASSERT(zio->io_vsd == NULL); 1867 1868 return (ZIO_PIPELINE_CONTINUE); 1869} 1870 1871static boolean_t 1872zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 1873{ 1874 spa_t *spa = zio->io_spa; 1875 1876 /* 1877 * Note: we compare the original data, not the transformed data, 1878 * because when zio->io_bp is an override bp, we will not have 1879 * pushed the I/O transforms. That's an important optimization 1880 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 1881 */ 1882 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 1883 zio_t *lio = dde->dde_lead_zio[p]; 1884 1885 if (lio != NULL) { 1886 return (lio->io_orig_size != zio->io_orig_size || 1887 bcmp(zio->io_orig_data, lio->io_orig_data, 1888 zio->io_orig_size) != 0); 1889 } 1890 } 1891 1892 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 1893 ddt_phys_t *ddp = &dde->dde_phys[p]; 1894 1895 if (ddp->ddp_phys_birth != 0) { 1896 arc_buf_t *abuf = NULL; 1897 uint32_t aflags = ARC_WAIT; 1898 blkptr_t blk = *zio->io_bp; 1899 int error; 1900 1901 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 1902 1903 ddt_exit(ddt); 1904 1905 error = arc_read_nolock(NULL, spa, &blk, 1906 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 1907 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1908 &aflags, &zio->io_bookmark); 1909 1910 if (error == 0) { 1911 if (arc_buf_size(abuf) != zio->io_orig_size || 1912 bcmp(abuf->b_data, zio->io_orig_data, 1913 zio->io_orig_size) != 0) 1914 error = EEXIST; 1915 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1916 } 1917 1918 ddt_enter(ddt); 1919 return (error != 0); 1920 } 1921 } 1922 1923 return (B_FALSE); 1924} 1925 1926static void 1927zio_ddt_child_write_ready(zio_t *zio) 1928{ 1929 int p = zio->io_prop.zp_copies; 1930 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 1931 ddt_entry_t *dde = zio->io_private; 1932 ddt_phys_t *ddp = &dde->dde_phys[p]; 1933 zio_t *pio; 1934 1935 if (zio->io_error) 1936 return; 1937 1938 ddt_enter(ddt); 1939 1940 ASSERT(dde->dde_lead_zio[p] == zio); 1941 1942 ddt_phys_fill(ddp, zio->io_bp); 1943 1944 while ((pio = zio_walk_parents(zio)) != NULL) 1945 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 1946 1947 ddt_exit(ddt); 1948} 1949 1950static void 1951zio_ddt_child_write_done(zio_t *zio) 1952{ 1953 int p = zio->io_prop.zp_copies; 1954 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 1955 ddt_entry_t *dde = zio->io_private; 1956 ddt_phys_t *ddp = &dde->dde_phys[p]; 1957 1958 ddt_enter(ddt); 1959 1960 ASSERT(ddp->ddp_refcnt == 0); 1961 ASSERT(dde->dde_lead_zio[p] == zio); 1962 dde->dde_lead_zio[p] = NULL; 1963 1964 if (zio->io_error == 0) { 1965 while (zio_walk_parents(zio) != NULL) 1966 ddt_phys_addref(ddp); 1967 } else { 1968 ddt_phys_clear(ddp); 1969 } 1970 1971 ddt_exit(ddt); 1972} 1973 1974static void 1975zio_ddt_ditto_write_done(zio_t *zio) 1976{ 1977 int p = DDT_PHYS_DITTO; 1978 zio_prop_t *zp = &zio->io_prop; 1979 blkptr_t *bp = zio->io_bp; 1980 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1981 ddt_entry_t *dde = zio->io_private; 1982 ddt_phys_t *ddp = &dde->dde_phys[p]; 1983 ddt_key_t *ddk = &dde->dde_key; 1984 1985 ddt_enter(ddt); 1986 1987 ASSERT(ddp->ddp_refcnt == 0); 1988 ASSERT(dde->dde_lead_zio[p] == zio); 1989 dde->dde_lead_zio[p] = NULL; 1990 1991 if (zio->io_error == 0) { 1992 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 1993 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 1994 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 1995 if (ddp->ddp_phys_birth != 0) 1996 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 1997 ddt_phys_fill(ddp, bp); 1998 } 1999 2000 ddt_exit(ddt); 2001} 2002 2003static int 2004zio_ddt_write(zio_t *zio) 2005{ 2006 spa_t *spa = zio->io_spa; 2007 blkptr_t *bp = zio->io_bp; 2008 uint64_t txg = zio->io_txg; 2009 zio_prop_t *zp = &zio->io_prop; 2010 int p = zp->zp_copies; 2011 int ditto_copies; 2012 zio_t *cio = NULL; 2013 zio_t *dio = NULL; 2014 ddt_t *ddt = ddt_select(spa, bp); 2015 ddt_entry_t *dde; 2016 ddt_phys_t *ddp; 2017 2018 ASSERT(BP_GET_DEDUP(bp)); 2019 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2020 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2021 2022 ddt_enter(ddt); 2023 dde = ddt_lookup(ddt, bp, B_TRUE); 2024 ddp = &dde->dde_phys[p]; 2025 2026 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2027 /* 2028 * If we're using a weak checksum, upgrade to a strong checksum 2029 * and try again. If we're already using a strong checksum, 2030 * we can't resolve it, so just convert to an ordinary write. 2031 * (And automatically e-mail a paper to Nature?) 2032 */ 2033 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2034 zp->zp_checksum = spa_dedup_checksum(spa); 2035 zio_pop_transforms(zio); 2036 zio->io_stage = ZIO_STAGE_OPEN; 2037 BP_ZERO(bp); 2038 } else { 2039 zp->zp_dedup = 0; 2040 } 2041 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2042 ddt_exit(ddt); 2043 return (ZIO_PIPELINE_CONTINUE); 2044 } 2045 2046 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2047 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2048 2049 if (ditto_copies > ddt_ditto_copies_present(dde) && 2050 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2051 zio_prop_t czp = *zp; 2052 2053 czp.zp_copies = ditto_copies; 2054 2055 /* 2056 * If we arrived here with an override bp, we won't have run 2057 * the transform stack, so we won't have the data we need to 2058 * generate a child i/o. So, toss the override bp and restart. 2059 * This is safe, because using the override bp is just an 2060 * optimization; and it's rare, so the cost doesn't matter. 2061 */ 2062 if (zio->io_bp_override) { 2063 zio_pop_transforms(zio); 2064 zio->io_stage = ZIO_STAGE_OPEN; 2065 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2066 zio->io_bp_override = NULL; 2067 BP_ZERO(bp); 2068 ddt_exit(ddt); 2069 return (ZIO_PIPELINE_CONTINUE); 2070 } 2071 2072 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2073 zio->io_orig_size, &czp, NULL, 2074 zio_ddt_ditto_write_done, dde, zio->io_priority, 2075 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2076 2077 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2078 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2079 } 2080 2081 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2082 if (ddp->ddp_phys_birth != 0) 2083 ddt_bp_fill(ddp, bp, txg); 2084 if (dde->dde_lead_zio[p] != NULL) 2085 zio_add_child(zio, dde->dde_lead_zio[p]); 2086 else 2087 ddt_phys_addref(ddp); 2088 } else if (zio->io_bp_override) { 2089 ASSERT(bp->blk_birth == txg); 2090 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2091 ddt_phys_fill(ddp, bp); 2092 ddt_phys_addref(ddp); 2093 } else { 2094 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2095 zio->io_orig_size, zp, zio_ddt_child_write_ready, 2096 zio_ddt_child_write_done, dde, zio->io_priority, 2097 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2098 2099 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2100 dde->dde_lead_zio[p] = cio; 2101 } 2102 2103 ddt_exit(ddt); 2104 2105 if (cio) 2106 zio_nowait(cio); 2107 if (dio) 2108 zio_nowait(dio); 2109 2110 return (ZIO_PIPELINE_CONTINUE); 2111} 2112 2113ddt_entry_t *freedde; /* for debugging */ 2114 2115static int 2116zio_ddt_free(zio_t *zio) 2117{ 2118 spa_t *spa = zio->io_spa; 2119 blkptr_t *bp = zio->io_bp; 2120 ddt_t *ddt = ddt_select(spa, bp); 2121 ddt_entry_t *dde; 2122 ddt_phys_t *ddp; 2123 2124 ASSERT(BP_GET_DEDUP(bp)); 2125 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2126 2127 ddt_enter(ddt); 2128 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2129 ddp = ddt_phys_select(dde, bp); 2130 ddt_phys_decref(ddp); 2131 ddt_exit(ddt); 2132 2133 return (ZIO_PIPELINE_CONTINUE); 2134} 2135 2136/* 2137 * ========================================================================== 2138 * Allocate and free blocks 2139 * ========================================================================== 2140 */ 2141static int 2142zio_dva_allocate(zio_t *zio) 2143{ 2144 spa_t *spa = zio->io_spa; 2145 metaslab_class_t *mc = spa_normal_class(spa); 2146 blkptr_t *bp = zio->io_bp; 2147 int error; 2148 2149 if (zio->io_gang_leader == NULL) { 2150 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2151 zio->io_gang_leader = zio; 2152 } 2153 2154 ASSERT(BP_IS_HOLE(bp)); 2155 ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 2156 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2157 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2158 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2159 2160 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2161 zio->io_prop.zp_copies, zio->io_txg, NULL, 0); 2162 2163 if (error) { 2164 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2165 return (zio_write_gang_block(zio)); 2166 zio->io_error = error; 2167 } 2168 2169 return (ZIO_PIPELINE_CONTINUE); 2170} 2171 2172static int 2173zio_dva_free(zio_t *zio) 2174{ 2175 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2176 2177 return (ZIO_PIPELINE_CONTINUE); 2178} 2179 2180static int 2181zio_dva_claim(zio_t *zio) 2182{ 2183 int error; 2184 2185 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2186 if (error) 2187 zio->io_error = error; 2188 2189 return (ZIO_PIPELINE_CONTINUE); 2190} 2191 2192/* 2193 * Undo an allocation. This is used by zio_done() when an I/O fails 2194 * and we want to give back the block we just allocated. 2195 * This handles both normal blocks and gang blocks. 2196 */ 2197static void 2198zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2199{ 2200 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2201 ASSERT(zio->io_bp_override == NULL); 2202 2203 if (!BP_IS_HOLE(bp)) 2204 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2205 2206 if (gn != NULL) { 2207 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2208 zio_dva_unallocate(zio, gn->gn_child[g], 2209 &gn->gn_gbh->zg_blkptr[g]); 2210 } 2211 } 2212} 2213 2214/* 2215 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2216 */ 2217int 2218zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2219 uint64_t size, boolean_t use_slog) 2220{ 2221 int error = 1; 2222 2223 ASSERT(txg > spa_syncing_txg(spa)); 2224 2225 if (use_slog) 2226 error = metaslab_alloc(spa, spa_log_class(spa), size, 2227 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID); 2228 2229 if (error) 2230 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2231 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID); 2232 2233 if (error == 0) { 2234 BP_SET_LSIZE(new_bp, size); 2235 BP_SET_PSIZE(new_bp, size); 2236 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2237 BP_SET_CHECKSUM(new_bp, 2238 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2239 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2240 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2241 BP_SET_LEVEL(new_bp, 0); 2242 BP_SET_DEDUP(new_bp, 0); 2243 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2244 } 2245 2246 return (error); 2247} 2248 2249/* 2250 * Free an intent log block. 2251 */ 2252void 2253zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2254{ 2255 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2256 ASSERT(!BP_IS_GANG(bp)); 2257 2258 zio_free(spa, txg, bp); 2259} 2260 2261/* 2262 * ========================================================================== 2263 * Read and write to physical devices 2264 * ========================================================================== 2265 */ 2266static int 2267zio_vdev_io_start(zio_t *zio) 2268{ 2269 vdev_t *vd = zio->io_vd; 2270 uint64_t align; 2271 spa_t *spa = zio->io_spa; 2272 2273 ASSERT(zio->io_error == 0); 2274 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2275 2276 if (vd == NULL) { 2277 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2278 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2279 2280 /* 2281 * The mirror_ops handle multiple DVAs in a single BP. 2282 */ 2283 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2284 } 2285 2286 /* 2287 * We keep track of time-sensitive I/Os so that the scan thread 2288 * can quickly react to certain workloads. In particular, we care 2289 * about non-scrubbing, top-level reads and writes with the following 2290 * characteristics: 2291 * - synchronous writes of user data to non-slog devices 2292 * - any reads of user data 2293 * When these conditions are met, adjust the timestamp of spa_last_io 2294 * which allows the scan thread to adjust its workload accordingly. 2295 */ 2296 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2297 vd == vd->vdev_top && !vd->vdev_islog && 2298 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2299 zio->io_txg != spa_syncing_txg(spa)) { 2300 uint64_t old = spa->spa_last_io; 2301 uint64_t new = ddi_get_lbolt64(); 2302 if (old != new) 2303 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2304 } 2305 2306 align = 1ULL << vd->vdev_top->vdev_ashift; 2307 2308 if (P2PHASE(zio->io_size, align) != 0) { 2309 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2310 char *abuf = zio_buf_alloc(asize); 2311 ASSERT(vd == vd->vdev_top); 2312 if (zio->io_type == ZIO_TYPE_WRITE) { 2313 bcopy(zio->io_data, abuf, zio->io_size); 2314 bzero(abuf + zio->io_size, asize - zio->io_size); 2315 } 2316 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 2317 } 2318 2319 ASSERT(P2PHASE(zio->io_offset, align) == 0); 2320 ASSERT(P2PHASE(zio->io_size, align) == 0); 2321 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 2322 2323 /* 2324 * If this is a repair I/O, and there's no self-healing involved -- 2325 * that is, we're just resilvering what we expect to resilver -- 2326 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2327 * This prevents spurious resilvering with nested replication. 2328 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2329 * A is out of date, we'll read from C+D, then use the data to 2330 * resilver A+B -- but we don't actually want to resilver B, just A. 2331 * The top-level mirror has no way to know this, so instead we just 2332 * discard unnecessary repairs as we work our way down the vdev tree. 2333 * The same logic applies to any form of nested replication: 2334 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2335 */ 2336 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2337 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2338 zio->io_txg != 0 && /* not a delegated i/o */ 2339 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2340 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2341 zio_vdev_io_bypass(zio); 2342 return (ZIO_PIPELINE_CONTINUE); 2343 } 2344 2345 if (vd->vdev_ops->vdev_op_leaf && 2346 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 2347 2348 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) 2349 return (ZIO_PIPELINE_CONTINUE); 2350 2351 if ((zio = vdev_queue_io(zio)) == NULL) 2352 return (ZIO_PIPELINE_STOP); 2353 2354 if (!vdev_accessible(vd, zio)) { 2355 zio->io_error = ENXIO; 2356 zio_interrupt(zio); 2357 return (ZIO_PIPELINE_STOP); 2358 } 2359 } 2360 2361 return (vd->vdev_ops->vdev_op_io_start(zio)); 2362} 2363 2364static int 2365zio_vdev_io_done(zio_t *zio) 2366{ 2367 vdev_t *vd = zio->io_vd; 2368 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2369 boolean_t unexpected_error = B_FALSE; 2370 2371 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2372 return (ZIO_PIPELINE_STOP); 2373 2374 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 2375 2376 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 2377 2378 vdev_queue_io_done(zio); 2379 2380 if (zio->io_type == ZIO_TYPE_WRITE) 2381 vdev_cache_write(zio); 2382 2383 if (zio_injection_enabled && zio->io_error == 0) 2384 zio->io_error = zio_handle_device_injection(vd, 2385 zio, EIO); 2386 2387 if (zio_injection_enabled && zio->io_error == 0) 2388 zio->io_error = zio_handle_label_injection(zio, EIO); 2389 2390 if (zio->io_error) { 2391 if (!vdev_accessible(vd, zio)) { 2392 zio->io_error = ENXIO; 2393 } else { 2394 unexpected_error = B_TRUE; 2395 } 2396 } 2397 } 2398 2399 ops->vdev_op_io_done(zio); 2400 2401 if (unexpected_error) 2402 VERIFY(vdev_probe(vd, zio) == NULL); 2403 2404 return (ZIO_PIPELINE_CONTINUE); 2405} 2406 2407/* 2408 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2409 * disk, and use that to finish the checksum ereport later. 2410 */ 2411static void 2412zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2413 const void *good_buf) 2414{ 2415 /* no processing needed */ 2416 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2417} 2418 2419/*ARGSUSED*/ 2420void 2421zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2422{ 2423 void *buf = zio_buf_alloc(zio->io_size); 2424 2425 bcopy(zio->io_data, buf, zio->io_size); 2426 2427 zcr->zcr_cbinfo = zio->io_size; 2428 zcr->zcr_cbdata = buf; 2429 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2430 zcr->zcr_free = zio_buf_free; 2431} 2432 2433static int 2434zio_vdev_io_assess(zio_t *zio) 2435{ 2436 vdev_t *vd = zio->io_vd; 2437 2438 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2439 return (ZIO_PIPELINE_STOP); 2440 2441 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2442 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2443 2444 if (zio->io_vsd != NULL) { 2445 zio->io_vsd_ops->vsd_free(zio); 2446 zio->io_vsd = NULL; 2447 } 2448 2449 if (zio_injection_enabled && zio->io_error == 0) 2450 zio->io_error = zio_handle_fault_injection(zio, EIO); 2451 2452 /* 2453 * If the I/O failed, determine whether we should attempt to retry it. 2454 * 2455 * On retry, we cut in line in the issue queue, since we don't want 2456 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2457 */ 2458 if (zio->io_error && vd == NULL && 2459 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2460 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2461 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2462 zio->io_error = 0; 2463 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2464 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2465 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2466 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2467 zio_requeue_io_start_cut_in_line); 2468 return (ZIO_PIPELINE_STOP); 2469 } 2470 2471 /* 2472 * If we got an error on a leaf device, convert it to ENXIO 2473 * if the device is not accessible at all. 2474 */ 2475 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2476 !vdev_accessible(vd, zio)) 2477 zio->io_error = ENXIO; 2478 2479 /* 2480 * If we can't write to an interior vdev (mirror or RAID-Z), 2481 * set vdev_cant_write so that we stop trying to allocate from it. 2482 */ 2483 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2484 vd != NULL && !vd->vdev_ops->vdev_op_leaf) 2485 vd->vdev_cant_write = B_TRUE; 2486 2487 if (zio->io_error) 2488 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2489 2490 return (ZIO_PIPELINE_CONTINUE); 2491} 2492 2493void 2494zio_vdev_io_reissue(zio_t *zio) 2495{ 2496 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2497 ASSERT(zio->io_error == 0); 2498 2499 zio->io_stage >>= 1; 2500} 2501 2502void 2503zio_vdev_io_redone(zio_t *zio) 2504{ 2505 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2506 2507 zio->io_stage >>= 1; 2508} 2509 2510void 2511zio_vdev_io_bypass(zio_t *zio) 2512{ 2513 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2514 ASSERT(zio->io_error == 0); 2515 2516 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2517 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2518} 2519 2520/* 2521 * ========================================================================== 2522 * Generate and verify checksums 2523 * ========================================================================== 2524 */ 2525static int 2526zio_checksum_generate(zio_t *zio) 2527{ 2528 blkptr_t *bp = zio->io_bp; 2529 enum zio_checksum checksum; 2530 2531 if (bp == NULL) { 2532 /* 2533 * This is zio_write_phys(). 2534 * We're either generating a label checksum, or none at all. 2535 */ 2536 checksum = zio->io_prop.zp_checksum; 2537 2538 if (checksum == ZIO_CHECKSUM_OFF) 2539 return (ZIO_PIPELINE_CONTINUE); 2540 2541 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2542 } else { 2543 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2544 ASSERT(!IO_IS_ALLOCATING(zio)); 2545 checksum = ZIO_CHECKSUM_GANG_HEADER; 2546 } else { 2547 checksum = BP_GET_CHECKSUM(bp); 2548 } 2549 } 2550 2551 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2552 2553 return (ZIO_PIPELINE_CONTINUE); 2554} 2555 2556static int 2557zio_checksum_verify(zio_t *zio) 2558{ 2559 zio_bad_cksum_t info; 2560 blkptr_t *bp = zio->io_bp; 2561 int error; 2562 2563 ASSERT(zio->io_vd != NULL); 2564 2565 if (bp == NULL) { 2566 /* 2567 * This is zio_read_phys(). 2568 * We're either verifying a label checksum, or nothing at all. 2569 */ 2570 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2571 return (ZIO_PIPELINE_CONTINUE); 2572 2573 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2574 } 2575 2576 if ((error = zio_checksum_error(zio, &info)) != 0) { 2577 zio->io_error = error; 2578 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2579 zfs_ereport_start_checksum(zio->io_spa, 2580 zio->io_vd, zio, zio->io_offset, 2581 zio->io_size, NULL, &info); 2582 } 2583 } 2584 2585 return (ZIO_PIPELINE_CONTINUE); 2586} 2587 2588/* 2589 * Called by RAID-Z to ensure we don't compute the checksum twice. 2590 */ 2591void 2592zio_checksum_verified(zio_t *zio) 2593{ 2594 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 2595} 2596 2597/* 2598 * ========================================================================== 2599 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2600 * An error of 0 indictes success. ENXIO indicates whole-device failure, 2601 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2602 * indicate errors that are specific to one I/O, and most likely permanent. 2603 * Any other error is presumed to be worse because we weren't expecting it. 2604 * ========================================================================== 2605 */ 2606int 2607zio_worst_error(int e1, int e2) 2608{ 2609 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2610 int r1, r2; 2611 2612 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2613 if (e1 == zio_error_rank[r1]) 2614 break; 2615 2616 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2617 if (e2 == zio_error_rank[r2]) 2618 break; 2619 2620 return (r1 > r2 ? e1 : e2); 2621} 2622 2623/* 2624 * ========================================================================== 2625 * I/O completion 2626 * ========================================================================== 2627 */ 2628static int 2629zio_ready(zio_t *zio) 2630{ 2631 blkptr_t *bp = zio->io_bp; 2632 zio_t *pio, *pio_next; 2633 2634 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 2635 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 2636 return (ZIO_PIPELINE_STOP); 2637 2638 if (zio->io_ready) { 2639 ASSERT(IO_IS_ALLOCATING(zio)); 2640 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2641 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 2642 2643 zio->io_ready(zio); 2644 } 2645 2646 if (bp != NULL && bp != &zio->io_bp_copy) 2647 zio->io_bp_copy = *bp; 2648 2649 if (zio->io_error) 2650 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2651 2652 mutex_enter(&zio->io_lock); 2653 zio->io_state[ZIO_WAIT_READY] = 1; 2654 pio = zio_walk_parents(zio); 2655 mutex_exit(&zio->io_lock); 2656 2657 /* 2658 * As we notify zio's parents, new parents could be added. 2659 * New parents go to the head of zio's io_parent_list, however, 2660 * so we will (correctly) not notify them. The remainder of zio's 2661 * io_parent_list, from 'pio_next' onward, cannot change because 2662 * all parents must wait for us to be done before they can be done. 2663 */ 2664 for (; pio != NULL; pio = pio_next) { 2665 pio_next = zio_walk_parents(zio); 2666 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 2667 } 2668 2669 if (zio->io_flags & ZIO_FLAG_NODATA) { 2670 if (BP_IS_GANG(bp)) { 2671 zio->io_flags &= ~ZIO_FLAG_NODATA; 2672 } else { 2673 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 2674 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2675 } 2676 } 2677 2678 if (zio_injection_enabled && 2679 zio->io_spa->spa_syncing_txg == zio->io_txg) 2680 zio_handle_ignored_writes(zio); 2681 2682 return (ZIO_PIPELINE_CONTINUE); 2683} 2684 2685static int 2686zio_done(zio_t *zio) 2687{ 2688 spa_t *spa = zio->io_spa; 2689 zio_t *lio = zio->io_logical; 2690 blkptr_t *bp = zio->io_bp; 2691 vdev_t *vd = zio->io_vd; 2692 uint64_t psize = zio->io_size; 2693 zio_t *pio, *pio_next; 2694 2695 /* 2696 * If our children haven't all completed, 2697 * wait for them and then repeat this pipeline stage. 2698 */ 2699 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 2700 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 2701 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 2702 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 2703 return (ZIO_PIPELINE_STOP); 2704 2705 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2706 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2707 ASSERT(zio->io_children[c][w] == 0); 2708 2709 if (bp != NULL) { 2710 ASSERT(bp->blk_pad[0] == 0); 2711 ASSERT(bp->blk_pad[1] == 0); 2712 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 2713 (bp == zio_unique_parent(zio)->io_bp)); 2714 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 2715 zio->io_bp_override == NULL && 2716 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 2717 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 2718 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 2719 ASSERT(BP_COUNT_GANG(bp) == 0 || 2720 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 2721 } 2722 } 2723 2724 /* 2725 * If there were child vdev/gang/ddt errors, they apply to us now. 2726 */ 2727 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 2728 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 2729 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 2730 2731 /* 2732 * If the I/O on the transformed data was successful, generate any 2733 * checksum reports now while we still have the transformed data. 2734 */ 2735 if (zio->io_error == 0) { 2736 while (zio->io_cksum_report != NULL) { 2737 zio_cksum_report_t *zcr = zio->io_cksum_report; 2738 uint64_t align = zcr->zcr_align; 2739 uint64_t asize = P2ROUNDUP(psize, align); 2740 char *abuf = zio->io_data; 2741 2742 if (asize != psize) { 2743 abuf = zio_buf_alloc(asize); 2744 bcopy(zio->io_data, abuf, psize); 2745 bzero(abuf + psize, asize - psize); 2746 } 2747 2748 zio->io_cksum_report = zcr->zcr_next; 2749 zcr->zcr_next = NULL; 2750 zcr->zcr_finish(zcr, abuf); 2751 zfs_ereport_free_checksum(zcr); 2752 2753 if (asize != psize) 2754 zio_buf_free(abuf, asize); 2755 } 2756 } 2757 2758 zio_pop_transforms(zio); /* note: may set zio->io_error */ 2759 2760 vdev_stat_update(zio, psize); 2761 2762 if (zio->io_error) { 2763 /* 2764 * If this I/O is attached to a particular vdev, 2765 * generate an error message describing the I/O failure 2766 * at the block level. We ignore these errors if the 2767 * device is currently unavailable. 2768 */ 2769 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 2770 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 2771 2772 if ((zio->io_error == EIO || !(zio->io_flags & 2773 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 2774 zio == lio) { 2775 /* 2776 * For logical I/O requests, tell the SPA to log the 2777 * error and generate a logical data ereport. 2778 */ 2779 spa_log_error(spa, zio); 2780 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 2781 0, 0); 2782 } 2783 } 2784 2785 if (zio->io_error && zio == lio) { 2786 /* 2787 * Determine whether zio should be reexecuted. This will 2788 * propagate all the way to the root via zio_notify_parent(). 2789 */ 2790 ASSERT(vd == NULL && bp != NULL); 2791 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2792 2793 if (IO_IS_ALLOCATING(zio) && 2794 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 2795 if (zio->io_error != ENOSPC) 2796 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 2797 else 2798 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2799 } 2800 2801 if ((zio->io_type == ZIO_TYPE_READ || 2802 zio->io_type == ZIO_TYPE_FREE) && 2803 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 2804 zio->io_error == ENXIO && 2805 spa_load_state(spa) == SPA_LOAD_NONE && 2806 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 2807 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2808 2809 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 2810 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2811 2812 /* 2813 * Here is a possibly good place to attempt to do 2814 * either combinatorial reconstruction or error correction 2815 * based on checksums. It also might be a good place 2816 * to send out preliminary ereports before we suspend 2817 * processing. 2818 */ 2819 } 2820 2821 /* 2822 * If there were logical child errors, they apply to us now. 2823 * We defer this until now to avoid conflating logical child 2824 * errors with errors that happened to the zio itself when 2825 * updating vdev stats and reporting FMA events above. 2826 */ 2827 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 2828 2829 if ((zio->io_error || zio->io_reexecute) && 2830 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 2831 !(zio->io_flags & ZIO_FLAG_IO_REWRITE)) 2832 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 2833 2834 zio_gang_tree_free(&zio->io_gang_tree); 2835 2836 /* 2837 * Godfather I/Os should never suspend. 2838 */ 2839 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 2840 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 2841 zio->io_reexecute = 0; 2842 2843 if (zio->io_reexecute) { 2844 /* 2845 * This is a logical I/O that wants to reexecute. 2846 * 2847 * Reexecute is top-down. When an i/o fails, if it's not 2848 * the root, it simply notifies its parent and sticks around. 2849 * The parent, seeing that it still has children in zio_done(), 2850 * does the same. This percolates all the way up to the root. 2851 * The root i/o will reexecute or suspend the entire tree. 2852 * 2853 * This approach ensures that zio_reexecute() honors 2854 * all the original i/o dependency relationships, e.g. 2855 * parents not executing until children are ready. 2856 */ 2857 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2858 2859 zio->io_gang_leader = NULL; 2860 2861 mutex_enter(&zio->io_lock); 2862 zio->io_state[ZIO_WAIT_DONE] = 1; 2863 mutex_exit(&zio->io_lock); 2864 2865 /* 2866 * "The Godfather" I/O monitors its children but is 2867 * not a true parent to them. It will track them through 2868 * the pipeline but severs its ties whenever they get into 2869 * trouble (e.g. suspended). This allows "The Godfather" 2870 * I/O to return status without blocking. 2871 */ 2872 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2873 zio_link_t *zl = zio->io_walk_link; 2874 pio_next = zio_walk_parents(zio); 2875 2876 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 2877 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 2878 zio_remove_child(pio, zio, zl); 2879 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2880 } 2881 } 2882 2883 if ((pio = zio_unique_parent(zio)) != NULL) { 2884 /* 2885 * We're not a root i/o, so there's nothing to do 2886 * but notify our parent. Don't propagate errors 2887 * upward since we haven't permanently failed yet. 2888 */ 2889 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2890 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 2891 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2892 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 2893 /* 2894 * We'd fail again if we reexecuted now, so suspend 2895 * until conditions improve (e.g. device comes online). 2896 */ 2897 zio_suspend(spa, zio); 2898 } else { 2899 /* 2900 * Reexecution is potentially a huge amount of work. 2901 * Hand it off to the otherwise-unused claim taskq. 2902 */ 2903#ifdef _KERNEL 2904 (void) taskq_dispatch_safe( 2905 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 2906 (task_func_t *)zio_reexecute, zio, TQ_SLEEP, 2907 &zio->io_task_issue); 2908#else 2909 (void) taskq_dispatch( 2910 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 2911 (task_func_t *)zio_reexecute, zio, TQ_SLEEP); 2912#endif 2913 } 2914 return (ZIO_PIPELINE_STOP); 2915 } 2916 2917 ASSERT(zio->io_child_count == 0); 2918 ASSERT(zio->io_reexecute == 0); 2919 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 2920 2921 /* 2922 * Report any checksum errors, since the I/O is complete. 2923 */ 2924 while (zio->io_cksum_report != NULL) { 2925 zio_cksum_report_t *zcr = zio->io_cksum_report; 2926 zio->io_cksum_report = zcr->zcr_next; 2927 zcr->zcr_next = NULL; 2928 zcr->zcr_finish(zcr, NULL); 2929 zfs_ereport_free_checksum(zcr); 2930 } 2931 2932 /* 2933 * It is the responsibility of the done callback to ensure that this 2934 * particular zio is no longer discoverable for adoption, and as 2935 * such, cannot acquire any new parents. 2936 */ 2937 if (zio->io_done) 2938 zio->io_done(zio); 2939 2940 mutex_enter(&zio->io_lock); 2941 zio->io_state[ZIO_WAIT_DONE] = 1; 2942 mutex_exit(&zio->io_lock); 2943 2944 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2945 zio_link_t *zl = zio->io_walk_link; 2946 pio_next = zio_walk_parents(zio); 2947 zio_remove_child(pio, zio, zl); 2948 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2949 } 2950 2951 if (zio->io_waiter != NULL) { 2952 mutex_enter(&zio->io_lock); 2953 zio->io_executor = NULL; 2954 cv_broadcast(&zio->io_cv); 2955 mutex_exit(&zio->io_lock); 2956 } else { 2957 zio_destroy(zio); 2958 } 2959 2960 return (ZIO_PIPELINE_STOP); 2961} 2962 2963/* 2964 * ========================================================================== 2965 * I/O pipeline definition 2966 * ========================================================================== 2967 */ 2968static zio_pipe_stage_t *zio_pipeline[] = { 2969 NULL, 2970 zio_read_bp_init, 2971 zio_free_bp_init, 2972 zio_issue_async, 2973 zio_write_bp_init, 2974 zio_checksum_generate, 2975 zio_ddt_read_start, 2976 zio_ddt_read_done, 2977 zio_ddt_write, 2978 zio_ddt_free, 2979 zio_gang_assemble, 2980 zio_gang_issue, 2981 zio_dva_allocate, 2982 zio_dva_free, 2983 zio_dva_claim, 2984 zio_ready, 2985 zio_vdev_io_start, 2986 zio_vdev_io_done, 2987 zio_vdev_io_assess, 2988 zio_checksum_verify, 2989 zio_done 2990}; 2991