zio.c revision 230647
1251881Speter/* 2251881Speter * CDDL HEADER START 3251881Speter * 4251881Speter * The contents of this file are subject to the terms of the 5251881Speter * Common Development and Distribution License (the "License"). 6251881Speter * You may not use this file except in compliance with the License. 7251881Speter * 8251881Speter * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9251881Speter * or http://www.opensolaris.org/os/licensing. 10251881Speter * See the License for the specific language governing permissions 11251881Speter * and limitations under the License. 12251881Speter * 13251881Speter * When distributing Covered Code, include this CDDL HEADER in each 14251881Speter * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15251881Speter * If applicable, add the following below this CDDL HEADER, with the 16251881Speter * fields enclosed by brackets "[]" replaced with your own identifying 17251881Speter * information: Portions Copyright [yyyy] [name of copyright owner] 18251881Speter * 19251881Speter * CDDL HEADER END 20251881Speter */ 21251881Speter/* 22251881Speter * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23251881Speter * Copyright (c) 2012 by Delphix. All rights reserved. 24251881Speter */ 25251881Speter 26251881Speter#include <sys/zfs_context.h> 27251881Speter#include <sys/fm/fs/zfs.h> 28251881Speter#include <sys/spa.h> 29251881Speter#include <sys/txg.h> 30251881Speter#include <sys/spa_impl.h> 31251881Speter#include <sys/vdev_impl.h> 32251881Speter#include <sys/zio_impl.h> 33251881Speter#include <sys/zio_compress.h> 34251881Speter#include <sys/zio_checksum.h> 35251881Speter#include <sys/dmu_objset.h> 36251881Speter#include <sys/arc.h> 37251881Speter#include <sys/ddt.h> 38251881Speter 39251881SpeterSYSCTL_DECL(_vfs_zfs); 40251881SpeterSYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 41251881Speterstatic int zio_use_uma = 0; 42251881SpeterTUNABLE_INT("vfs.zfs.zio.use_uma", &zio_use_uma); 43251881SpeterSYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, 44251881Speter "Use uma(9) for ZIO allocations"); 45251881Speterstatic int zio_exclude_metadata = 0; 46251881SpeterTUNABLE_INT("vfs.zfs.zio.exclude_metadata", &zio_exclude_metadata); 47251881SpeterSYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0, 48251881Speter "Exclude metadata buffers from dumps as well"); 49251881Speter 50251881Speter/* 51251881Speter * ========================================================================== 52251881Speter * I/O priority table 53251881Speter * ========================================================================== 54251881Speter */ 55251881Speteruint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 56251881Speter 0, /* ZIO_PRIORITY_NOW */ 57251881Speter 0, /* ZIO_PRIORITY_SYNC_READ */ 58251881Speter 0, /* ZIO_PRIORITY_SYNC_WRITE */ 59251881Speter 0, /* ZIO_PRIORITY_LOG_WRITE */ 60251881Speter 1, /* ZIO_PRIORITY_CACHE_FILL */ 61251881Speter 1, /* ZIO_PRIORITY_AGG */ 62251881Speter 4, /* ZIO_PRIORITY_FREE */ 63251881Speter 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 64251881Speter 6, /* ZIO_PRIORITY_ASYNC_READ */ 65251881Speter 10, /* ZIO_PRIORITY_RESILVER */ 66251881Speter 20, /* ZIO_PRIORITY_SCRUB */ 67251881Speter 2, /* ZIO_PRIORITY_DDT_PREFETCH */ 68251881Speter}; 69251881Speter 70251881Speter/* 71251881Speter * ========================================================================== 72251881Speter * I/O type descriptions 73251881Speter * ========================================================================== 74251881Speter */ 75251881Speterchar *zio_type_name[ZIO_TYPES] = { 76251881Speter "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 77251881Speter "zio_ioctl" 78251881Speter}; 79251881Speter 80251881Speter/* 81251881Speter * ========================================================================== 82251881Speter * I/O kmem caches 83251881Speter * ========================================================================== 84251881Speter */ 85251881Speterkmem_cache_t *zio_cache; 86251881Speterkmem_cache_t *zio_link_cache; 87251881Speterkmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 88251881Speterkmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 89251881Speter 90251881Speter#ifdef _KERNEL 91251881Speterextern vmem_t *zio_alloc_arena; 92251881Speter#endif 93251881Speterextern int zfs_mg_alloc_failures; 94251881Speter 95251881Speter/* 96251881Speter * An allocating zio is one that either currently has the DVA allocate 97251881Speter * stage set or will have it later in its lifetime. 98251881Speter */ 99251881Speter#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 100251881Speter 101251881Speterboolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 102251881Speter 103251881Speter#ifdef ZFS_DEBUG 104251881Speterint zio_buf_debug_limit = 16384; 105251881Speter#else 106251881Speterint zio_buf_debug_limit = 0; 107251881Speter#endif 108251881Speter 109251881Spetervoid 110251881Speterzio_init(void) 111251881Speter{ 112251881Speter size_t c; 113251881Speter zio_cache = kmem_cache_create("zio_cache", 114251881Speter sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 115251881Speter zio_link_cache = kmem_cache_create("zio_link_cache", 116251881Speter sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 117251881Speter 118251881Speter /* 119251881Speter * For small buffers, we want a cache for each multiple of 120251881Speter * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 121251881Speter * for each quarter-power of 2. For large buffers, we want 122251881Speter * a cache for each multiple of PAGESIZE. 123251881Speter */ 124251881Speter for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 125251881Speter size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 126251881Speter size_t p2 = size; 127251881Speter size_t align = 0; 128251881Speter size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 129251881Speter 130251881Speter while (p2 & (p2 - 1)) 131251881Speter p2 &= p2 - 1; 132251881Speter 133251881Speter if (size <= 4 * SPA_MINBLOCKSIZE) { 134251881Speter align = SPA_MINBLOCKSIZE; 135251881Speter } else if (P2PHASE(size, PAGESIZE) == 0) { 136251881Speter align = PAGESIZE; 137251881Speter } else if (P2PHASE(size, p2 >> 2) == 0) { 138251881Speter align = p2 >> 2; 139251881Speter } 140251881Speter 141251881Speter if (align != 0) { 142251881Speter char name[36]; 143251881Speter (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 144251881Speter zio_buf_cache[c] = kmem_cache_create(name, size, 145251881Speter align, NULL, NULL, NULL, NULL, NULL, cflags); 146251881Speter 147251881Speter /* 148251881Speter * Since zio_data bufs do not appear in crash dumps, we 149251881Speter * pass KMC_NOTOUCH so that no allocator metadata is 150251881Speter * stored with the buffers. 151251881Speter */ 152251881Speter (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 153251881Speter zio_data_buf_cache[c] = kmem_cache_create(name, size, 154251881Speter align, NULL, NULL, NULL, NULL, NULL, 155251881Speter cflags | KMC_NOTOUCH); 156251881Speter } 157251881Speter } 158251881Speter 159251881Speter while (--c != 0) { 160251881Speter ASSERT(zio_buf_cache[c] != NULL); 161251881Speter if (zio_buf_cache[c - 1] == NULL) 162251881Speter zio_buf_cache[c - 1] = zio_buf_cache[c]; 163251881Speter 164251881Speter ASSERT(zio_data_buf_cache[c] != NULL); 165251881Speter if (zio_data_buf_cache[c - 1] == NULL) 166251881Speter zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 167251881Speter } 168251881Speter 169251881Speter /* 170251881Speter * The zio write taskqs have 1 thread per cpu, allow 1/2 of the taskqs 171 * to fail 3 times per txg or 8 failures, whichever is greater. 172 */ 173 if (zfs_mg_alloc_failures == 0) 174 zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8); 175 else if (zfs_mg_alloc_failures < 8) 176 zfs_mg_alloc_failures = 8; 177 178 zio_inject_init(); 179} 180 181void 182zio_fini(void) 183{ 184 size_t c; 185 kmem_cache_t *last_cache = NULL; 186 kmem_cache_t *last_data_cache = NULL; 187 188 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 189 if (zio_buf_cache[c] != last_cache) { 190 last_cache = zio_buf_cache[c]; 191 kmem_cache_destroy(zio_buf_cache[c]); 192 } 193 zio_buf_cache[c] = NULL; 194 195 if (zio_data_buf_cache[c] != last_data_cache) { 196 last_data_cache = zio_data_buf_cache[c]; 197 kmem_cache_destroy(zio_data_buf_cache[c]); 198 } 199 zio_data_buf_cache[c] = NULL; 200 } 201 202 kmem_cache_destroy(zio_link_cache); 203 kmem_cache_destroy(zio_cache); 204 205 zio_inject_fini(); 206} 207 208/* 209 * ========================================================================== 210 * Allocate and free I/O buffers 211 * ========================================================================== 212 */ 213 214/* 215 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 216 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 217 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 218 * excess / transient data in-core during a crashdump. 219 */ 220void * 221zio_buf_alloc(size_t size) 222{ 223 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 224 int flags = zio_exclude_metadata ? KM_NODEBUG : 0; 225 226 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 227 228 if (zio_use_uma) 229 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 230 else 231 return (kmem_alloc(size, KM_SLEEP|flags)); 232} 233 234/* 235 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 236 * crashdump if the kernel panics. This exists so that we will limit the amount 237 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 238 * of kernel heap dumped to disk when the kernel panics) 239 */ 240void * 241zio_data_buf_alloc(size_t size) 242{ 243 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 244 245 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 246 247 if (zio_use_uma) 248 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 249 else 250 return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG)); 251} 252 253void 254zio_buf_free(void *buf, size_t size) 255{ 256 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 257 258 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 259 260 if (zio_use_uma) 261 kmem_cache_free(zio_buf_cache[c], buf); 262 else 263 kmem_free(buf, size); 264} 265 266void 267zio_data_buf_free(void *buf, size_t size) 268{ 269 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 270 271 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 272 273 if (zio_use_uma) 274 kmem_cache_free(zio_data_buf_cache[c], buf); 275 else 276 kmem_free(buf, size); 277} 278 279/* 280 * ========================================================================== 281 * Push and pop I/O transform buffers 282 * ========================================================================== 283 */ 284static void 285zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 286 zio_transform_func_t *transform) 287{ 288 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 289 290 zt->zt_orig_data = zio->io_data; 291 zt->zt_orig_size = zio->io_size; 292 zt->zt_bufsize = bufsize; 293 zt->zt_transform = transform; 294 295 zt->zt_next = zio->io_transform_stack; 296 zio->io_transform_stack = zt; 297 298 zio->io_data = data; 299 zio->io_size = size; 300} 301 302static void 303zio_pop_transforms(zio_t *zio) 304{ 305 zio_transform_t *zt; 306 307 while ((zt = zio->io_transform_stack) != NULL) { 308 if (zt->zt_transform != NULL) 309 zt->zt_transform(zio, 310 zt->zt_orig_data, zt->zt_orig_size); 311 312 if (zt->zt_bufsize != 0) 313 zio_buf_free(zio->io_data, zt->zt_bufsize); 314 315 zio->io_data = zt->zt_orig_data; 316 zio->io_size = zt->zt_orig_size; 317 zio->io_transform_stack = zt->zt_next; 318 319 kmem_free(zt, sizeof (zio_transform_t)); 320 } 321} 322 323/* 324 * ========================================================================== 325 * I/O transform callbacks for subblocks and decompression 326 * ========================================================================== 327 */ 328static void 329zio_subblock(zio_t *zio, void *data, uint64_t size) 330{ 331 ASSERT(zio->io_size > size); 332 333 if (zio->io_type == ZIO_TYPE_READ) 334 bcopy(zio->io_data, data, size); 335} 336 337static void 338zio_decompress(zio_t *zio, void *data, uint64_t size) 339{ 340 if (zio->io_error == 0 && 341 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 342 zio->io_data, data, zio->io_size, size) != 0) 343 zio->io_error = EIO; 344} 345 346/* 347 * ========================================================================== 348 * I/O parent/child relationships and pipeline interlocks 349 * ========================================================================== 350 */ 351/* 352 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 353 * continue calling these functions until they return NULL. 354 * Otherwise, the next caller will pick up the list walk in 355 * some indeterminate state. (Otherwise every caller would 356 * have to pass in a cookie to keep the state represented by 357 * io_walk_link, which gets annoying.) 358 */ 359zio_t * 360zio_walk_parents(zio_t *cio) 361{ 362 zio_link_t *zl = cio->io_walk_link; 363 list_t *pl = &cio->io_parent_list; 364 365 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 366 cio->io_walk_link = zl; 367 368 if (zl == NULL) 369 return (NULL); 370 371 ASSERT(zl->zl_child == cio); 372 return (zl->zl_parent); 373} 374 375zio_t * 376zio_walk_children(zio_t *pio) 377{ 378 zio_link_t *zl = pio->io_walk_link; 379 list_t *cl = &pio->io_child_list; 380 381 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 382 pio->io_walk_link = zl; 383 384 if (zl == NULL) 385 return (NULL); 386 387 ASSERT(zl->zl_parent == pio); 388 return (zl->zl_child); 389} 390 391zio_t * 392zio_unique_parent(zio_t *cio) 393{ 394 zio_t *pio = zio_walk_parents(cio); 395 396 VERIFY(zio_walk_parents(cio) == NULL); 397 return (pio); 398} 399 400void 401zio_add_child(zio_t *pio, zio_t *cio) 402{ 403 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 404 405 /* 406 * Logical I/Os can have logical, gang, or vdev children. 407 * Gang I/Os can have gang or vdev children. 408 * Vdev I/Os can only have vdev children. 409 * The following ASSERT captures all of these constraints. 410 */ 411 ASSERT(cio->io_child_type <= pio->io_child_type); 412 413 zl->zl_parent = pio; 414 zl->zl_child = cio; 415 416 mutex_enter(&cio->io_lock); 417 mutex_enter(&pio->io_lock); 418 419 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 420 421 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 422 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 423 424 list_insert_head(&pio->io_child_list, zl); 425 list_insert_head(&cio->io_parent_list, zl); 426 427 pio->io_child_count++; 428 cio->io_parent_count++; 429 430 mutex_exit(&pio->io_lock); 431 mutex_exit(&cio->io_lock); 432} 433 434static void 435zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 436{ 437 ASSERT(zl->zl_parent == pio); 438 ASSERT(zl->zl_child == cio); 439 440 mutex_enter(&cio->io_lock); 441 mutex_enter(&pio->io_lock); 442 443 list_remove(&pio->io_child_list, zl); 444 list_remove(&cio->io_parent_list, zl); 445 446 pio->io_child_count--; 447 cio->io_parent_count--; 448 449 mutex_exit(&pio->io_lock); 450 mutex_exit(&cio->io_lock); 451 452 kmem_cache_free(zio_link_cache, zl); 453} 454 455static boolean_t 456zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 457{ 458 uint64_t *countp = &zio->io_children[child][wait]; 459 boolean_t waiting = B_FALSE; 460 461 mutex_enter(&zio->io_lock); 462 ASSERT(zio->io_stall == NULL); 463 if (*countp != 0) { 464 zio->io_stage >>= 1; 465 zio->io_stall = countp; 466 waiting = B_TRUE; 467 } 468 mutex_exit(&zio->io_lock); 469 470 return (waiting); 471} 472 473static void 474zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 475{ 476 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 477 int *errorp = &pio->io_child_error[zio->io_child_type]; 478 479 mutex_enter(&pio->io_lock); 480 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 481 *errorp = zio_worst_error(*errorp, zio->io_error); 482 pio->io_reexecute |= zio->io_reexecute; 483 ASSERT3U(*countp, >, 0); 484 if (--*countp == 0 && pio->io_stall == countp) { 485 pio->io_stall = NULL; 486 mutex_exit(&pio->io_lock); 487 zio_execute(pio); 488 } else { 489 mutex_exit(&pio->io_lock); 490 } 491} 492 493static void 494zio_inherit_child_errors(zio_t *zio, enum zio_child c) 495{ 496 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 497 zio->io_error = zio->io_child_error[c]; 498} 499 500/* 501 * ========================================================================== 502 * Create the various types of I/O (read, write, free, etc) 503 * ========================================================================== 504 */ 505static zio_t * 506zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 507 void *data, uint64_t size, zio_done_func_t *done, void *private, 508 zio_type_t type, int priority, enum zio_flag flags, 509 vdev_t *vd, uint64_t offset, const zbookmark_t *zb, 510 enum zio_stage stage, enum zio_stage pipeline) 511{ 512 zio_t *zio; 513 514 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 515 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 516 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 517 518 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 519 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 520 ASSERT(vd || stage == ZIO_STAGE_OPEN); 521 522 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 523 bzero(zio, sizeof (zio_t)); 524 525 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 526 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 527 528 list_create(&zio->io_parent_list, sizeof (zio_link_t), 529 offsetof(zio_link_t, zl_parent_node)); 530 list_create(&zio->io_child_list, sizeof (zio_link_t), 531 offsetof(zio_link_t, zl_child_node)); 532 533 if (vd != NULL) 534 zio->io_child_type = ZIO_CHILD_VDEV; 535 else if (flags & ZIO_FLAG_GANG_CHILD) 536 zio->io_child_type = ZIO_CHILD_GANG; 537 else if (flags & ZIO_FLAG_DDT_CHILD) 538 zio->io_child_type = ZIO_CHILD_DDT; 539 else 540 zio->io_child_type = ZIO_CHILD_LOGICAL; 541 542 if (bp != NULL) { 543 zio->io_bp = (blkptr_t *)bp; 544 zio->io_bp_copy = *bp; 545 zio->io_bp_orig = *bp; 546 if (type != ZIO_TYPE_WRITE || 547 zio->io_child_type == ZIO_CHILD_DDT) 548 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 549 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 550 zio->io_logical = zio; 551 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 552 pipeline |= ZIO_GANG_STAGES; 553 } 554 555 zio->io_spa = spa; 556 zio->io_txg = txg; 557 zio->io_done = done; 558 zio->io_private = private; 559 zio->io_type = type; 560 zio->io_priority = priority; 561 zio->io_vd = vd; 562 zio->io_offset = offset; 563 zio->io_orig_data = zio->io_data = data; 564 zio->io_orig_size = zio->io_size = size; 565 zio->io_orig_flags = zio->io_flags = flags; 566 zio->io_orig_stage = zio->io_stage = stage; 567 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 568 569 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 570 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 571 572 if (zb != NULL) 573 zio->io_bookmark = *zb; 574 575 if (pio != NULL) { 576 if (zio->io_logical == NULL) 577 zio->io_logical = pio->io_logical; 578 if (zio->io_child_type == ZIO_CHILD_GANG) 579 zio->io_gang_leader = pio->io_gang_leader; 580 zio_add_child(pio, zio); 581 } 582 583 return (zio); 584} 585 586static void 587zio_destroy(zio_t *zio) 588{ 589 list_destroy(&zio->io_parent_list); 590 list_destroy(&zio->io_child_list); 591 mutex_destroy(&zio->io_lock); 592 cv_destroy(&zio->io_cv); 593 kmem_cache_free(zio_cache, zio); 594} 595 596zio_t * 597zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 598 void *private, enum zio_flag flags) 599{ 600 zio_t *zio; 601 602 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 603 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 604 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 605 606 return (zio); 607} 608 609zio_t * 610zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 611{ 612 return (zio_null(NULL, spa, NULL, done, private, flags)); 613} 614 615zio_t * 616zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 617 void *data, uint64_t size, zio_done_func_t *done, void *private, 618 int priority, enum zio_flag flags, const zbookmark_t *zb) 619{ 620 zio_t *zio; 621 622 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 623 data, size, done, private, 624 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 625 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 626 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 627 628 return (zio); 629} 630 631zio_t * 632zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 633 void *data, uint64_t size, const zio_prop_t *zp, 634 zio_done_func_t *ready, zio_done_func_t *done, void *private, 635 int priority, enum zio_flag flags, const zbookmark_t *zb) 636{ 637 zio_t *zio; 638 639 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 640 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 641 zp->zp_compress >= ZIO_COMPRESS_OFF && 642 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 643 zp->zp_type < DMU_OT_NUMTYPES && 644 zp->zp_level < 32 && 645 zp->zp_copies > 0 && 646 zp->zp_copies <= spa_max_replication(spa) && 647 zp->zp_dedup <= 1 && 648 zp->zp_dedup_verify <= 1); 649 650 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 651 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 652 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 653 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 654 655 zio->io_ready = ready; 656 zio->io_prop = *zp; 657 658 return (zio); 659} 660 661zio_t * 662zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 663 uint64_t size, zio_done_func_t *done, void *private, int priority, 664 enum zio_flag flags, zbookmark_t *zb) 665{ 666 zio_t *zio; 667 668 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 669 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 670 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 671 672 return (zio); 673} 674 675void 676zio_write_override(zio_t *zio, blkptr_t *bp, int copies) 677{ 678 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 679 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 680 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 681 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 682 683 zio->io_prop.zp_copies = copies; 684 zio->io_bp_override = bp; 685} 686 687void 688zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 689{ 690 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 691} 692 693zio_t * 694zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 695 enum zio_flag flags) 696{ 697 zio_t *zio; 698 699 dprintf_bp(bp, "freeing in txg %llu, pass %u", 700 (longlong_t)txg, spa->spa_sync_pass); 701 702 ASSERT(!BP_IS_HOLE(bp)); 703 ASSERT(spa_syncing_txg(spa) == txg); 704 ASSERT(spa_sync_pass(spa) <= SYNC_PASS_DEFERRED_FREE); 705 706 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 707 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags, 708 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 709 710 return (zio); 711} 712 713zio_t * 714zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 715 zio_done_func_t *done, void *private, enum zio_flag flags) 716{ 717 zio_t *zio; 718 719 /* 720 * A claim is an allocation of a specific block. Claims are needed 721 * to support immediate writes in the intent log. The issue is that 722 * immediate writes contain committed data, but in a txg that was 723 * *not* committed. Upon opening the pool after an unclean shutdown, 724 * the intent log claims all blocks that contain immediate write data 725 * so that the SPA knows they're in use. 726 * 727 * All claims *must* be resolved in the first txg -- before the SPA 728 * starts allocating blocks -- so that nothing is allocated twice. 729 * If txg == 0 we just verify that the block is claimable. 730 */ 731 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 732 ASSERT(txg == spa_first_txg(spa) || txg == 0); 733 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 734 735 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 736 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 737 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 738 739 return (zio); 740} 741 742zio_t * 743zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 744 zio_done_func_t *done, void *private, int priority, enum zio_flag flags) 745{ 746 zio_t *zio; 747 int c; 748 749 if (vd->vdev_children == 0) { 750 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 751 ZIO_TYPE_IOCTL, priority, flags, vd, 0, NULL, 752 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 753 754 zio->io_cmd = cmd; 755 } else { 756 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 757 758 for (c = 0; c < vd->vdev_children; c++) 759 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 760 done, private, priority, flags)); 761 } 762 763 return (zio); 764} 765 766zio_t * 767zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 768 void *data, int checksum, zio_done_func_t *done, void *private, 769 int priority, enum zio_flag flags, boolean_t labels) 770{ 771 zio_t *zio; 772 773 ASSERT(vd->vdev_children == 0); 774 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 775 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 776 ASSERT3U(offset + size, <=, vd->vdev_psize); 777 778 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 779 ZIO_TYPE_READ, priority, flags, vd, offset, NULL, 780 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 781 782 zio->io_prop.zp_checksum = checksum; 783 784 return (zio); 785} 786 787zio_t * 788zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 789 void *data, int checksum, zio_done_func_t *done, void *private, 790 int priority, enum zio_flag flags, boolean_t labels) 791{ 792 zio_t *zio; 793 794 ASSERT(vd->vdev_children == 0); 795 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 796 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 797 ASSERT3U(offset + size, <=, vd->vdev_psize); 798 799 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 800 ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, 801 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 802 803 zio->io_prop.zp_checksum = checksum; 804 805 if (zio_checksum_table[checksum].ci_eck) { 806 /* 807 * zec checksums are necessarily destructive -- they modify 808 * the end of the write buffer to hold the verifier/checksum. 809 * Therefore, we must make a local copy in case the data is 810 * being written to multiple places in parallel. 811 */ 812 void *wbuf = zio_buf_alloc(size); 813 bcopy(data, wbuf, size); 814 zio_push_transform(zio, wbuf, size, size, NULL); 815 } 816 817 return (zio); 818} 819 820/* 821 * Create a child I/O to do some work for us. 822 */ 823zio_t * 824zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 825 void *data, uint64_t size, int type, int priority, enum zio_flag flags, 826 zio_done_func_t *done, void *private) 827{ 828 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 829 zio_t *zio; 830 831 ASSERT(vd->vdev_parent == 832 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 833 834 if (type == ZIO_TYPE_READ && bp != NULL) { 835 /* 836 * If we have the bp, then the child should perform the 837 * checksum and the parent need not. This pushes error 838 * detection as close to the leaves as possible and 839 * eliminates redundant checksums in the interior nodes. 840 */ 841 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 842 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 843 } 844 845 if (vd->vdev_children == 0) 846 offset += VDEV_LABEL_START_SIZE; 847 848 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 849 850 /* 851 * If we've decided to do a repair, the write is not speculative -- 852 * even if the original read was. 853 */ 854 if (flags & ZIO_FLAG_IO_REPAIR) 855 flags &= ~ZIO_FLAG_SPECULATIVE; 856 857 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 858 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 859 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 860 861 return (zio); 862} 863 864zio_t * 865zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 866 int type, int priority, enum zio_flag flags, 867 zio_done_func_t *done, void *private) 868{ 869 zio_t *zio; 870 871 ASSERT(vd->vdev_ops->vdev_op_leaf); 872 873 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 874 data, size, done, private, type, priority, 875 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY, 876 vd, offset, NULL, 877 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 878 879 return (zio); 880} 881 882void 883zio_flush(zio_t *zio, vdev_t *vd) 884{ 885 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 886 NULL, NULL, ZIO_PRIORITY_NOW, 887 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 888} 889 890void 891zio_shrink(zio_t *zio, uint64_t size) 892{ 893 ASSERT(zio->io_executor == NULL); 894 ASSERT(zio->io_orig_size == zio->io_size); 895 ASSERT(size <= zio->io_size); 896 897 /* 898 * We don't shrink for raidz because of problems with the 899 * reconstruction when reading back less than the block size. 900 * Note, BP_IS_RAIDZ() assumes no compression. 901 */ 902 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 903 if (!BP_IS_RAIDZ(zio->io_bp)) 904 zio->io_orig_size = zio->io_size = size; 905} 906 907/* 908 * ========================================================================== 909 * Prepare to read and write logical blocks 910 * ========================================================================== 911 */ 912 913static int 914zio_read_bp_init(zio_t *zio) 915{ 916 blkptr_t *bp = zio->io_bp; 917 918 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 919 zio->io_child_type == ZIO_CHILD_LOGICAL && 920 !(zio->io_flags & ZIO_FLAG_RAW)) { 921 uint64_t psize = BP_GET_PSIZE(bp); 922 void *cbuf = zio_buf_alloc(psize); 923 924 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 925 } 926 927 if (!dmu_ot[BP_GET_TYPE(bp)].ot_metadata && BP_GET_LEVEL(bp) == 0) 928 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 929 930 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 931 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 932 933 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 934 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 935 936 return (ZIO_PIPELINE_CONTINUE); 937} 938 939static int 940zio_write_bp_init(zio_t *zio) 941{ 942 spa_t *spa = zio->io_spa; 943 zio_prop_t *zp = &zio->io_prop; 944 enum zio_compress compress = zp->zp_compress; 945 blkptr_t *bp = zio->io_bp; 946 uint64_t lsize = zio->io_size; 947 uint64_t psize = lsize; 948 int pass = 1; 949 950 /* 951 * If our children haven't all reached the ready stage, 952 * wait for them and then repeat this pipeline stage. 953 */ 954 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 955 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 956 return (ZIO_PIPELINE_STOP); 957 958 if (!IO_IS_ALLOCATING(zio)) 959 return (ZIO_PIPELINE_CONTINUE); 960 961 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 962 963 if (zio->io_bp_override) { 964 ASSERT(bp->blk_birth != zio->io_txg); 965 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 966 967 *bp = *zio->io_bp_override; 968 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 969 970 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 971 return (ZIO_PIPELINE_CONTINUE); 972 973 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 974 zp->zp_dedup_verify); 975 976 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 977 BP_SET_DEDUP(bp, 1); 978 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 979 return (ZIO_PIPELINE_CONTINUE); 980 } 981 zio->io_bp_override = NULL; 982 BP_ZERO(bp); 983 } 984 985 if (bp->blk_birth == zio->io_txg) { 986 /* 987 * We're rewriting an existing block, which means we're 988 * working on behalf of spa_sync(). For spa_sync() to 989 * converge, it must eventually be the case that we don't 990 * have to allocate new blocks. But compression changes 991 * the blocksize, which forces a reallocate, and makes 992 * convergence take longer. Therefore, after the first 993 * few passes, stop compressing to ensure convergence. 994 */ 995 pass = spa_sync_pass(spa); 996 997 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 998 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 999 ASSERT(!BP_GET_DEDUP(bp)); 1000 1001 if (pass > SYNC_PASS_DONT_COMPRESS) 1002 compress = ZIO_COMPRESS_OFF; 1003 1004 /* Make sure someone doesn't change their mind on overwrites */ 1005 ASSERT(MIN(zp->zp_copies + BP_IS_GANG(bp), 1006 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1007 } 1008 1009 if (compress != ZIO_COMPRESS_OFF) { 1010 void *cbuf = zio_buf_alloc(lsize); 1011 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1012 if (psize == 0 || psize == lsize) { 1013 compress = ZIO_COMPRESS_OFF; 1014 zio_buf_free(cbuf, lsize); 1015 } else { 1016 ASSERT(psize < lsize); 1017 zio_push_transform(zio, cbuf, psize, lsize, NULL); 1018 } 1019 } 1020 1021 /* 1022 * The final pass of spa_sync() must be all rewrites, but the first 1023 * few passes offer a trade-off: allocating blocks defers convergence, 1024 * but newly allocated blocks are sequential, so they can be written 1025 * to disk faster. Therefore, we allow the first few passes of 1026 * spa_sync() to allocate new blocks, but force rewrites after that. 1027 * There should only be a handful of blocks after pass 1 in any case. 1028 */ 1029 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize && 1030 pass > SYNC_PASS_REWRITE) { 1031 ASSERT(psize != 0); 1032 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1033 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1034 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1035 } else { 1036 BP_ZERO(bp); 1037 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1038 } 1039 1040 if (psize == 0) { 1041 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1042 } else { 1043 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1044 BP_SET_LSIZE(bp, lsize); 1045 BP_SET_PSIZE(bp, psize); 1046 BP_SET_COMPRESS(bp, compress); 1047 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1048 BP_SET_TYPE(bp, zp->zp_type); 1049 BP_SET_LEVEL(bp, zp->zp_level); 1050 BP_SET_DEDUP(bp, zp->zp_dedup); 1051 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1052 if (zp->zp_dedup) { 1053 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1054 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1055 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1056 } 1057 } 1058 1059 return (ZIO_PIPELINE_CONTINUE); 1060} 1061 1062static int 1063zio_free_bp_init(zio_t *zio) 1064{ 1065 blkptr_t *bp = zio->io_bp; 1066 1067 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1068 if (BP_GET_DEDUP(bp)) 1069 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1070 } 1071 1072 return (ZIO_PIPELINE_CONTINUE); 1073} 1074 1075/* 1076 * ========================================================================== 1077 * Execute the I/O pipeline 1078 * ========================================================================== 1079 */ 1080 1081static void 1082zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q, boolean_t cutinline) 1083{ 1084 spa_t *spa = zio->io_spa; 1085 zio_type_t t = zio->io_type; 1086 int flags = TQ_SLEEP | (cutinline ? TQ_FRONT : 0); 1087 1088 ASSERT(q == ZIO_TASKQ_ISSUE || q == ZIO_TASKQ_INTERRUPT); 1089 1090 /* 1091 * If we're a config writer or a probe, the normal issue and 1092 * interrupt threads may all be blocked waiting for the config lock. 1093 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1094 */ 1095 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1096 t = ZIO_TYPE_NULL; 1097 1098 /* 1099 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1100 */ 1101 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1102 t = ZIO_TYPE_NULL; 1103 1104 /* 1105 * If this is a high priority I/O, then use the high priority taskq. 1106 */ 1107 if (zio->io_priority == ZIO_PRIORITY_NOW && 1108 spa->spa_zio_taskq[t][q + 1] != NULL) 1109 q++; 1110 1111 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1112#ifdef _KERNEL 1113 (void) taskq_dispatch_safe(spa->spa_zio_taskq[t][q], 1114 (task_func_t *)zio_execute, zio, flags, &zio->io_task); 1115#else 1116 (void) taskq_dispatch(spa->spa_zio_taskq[t][q], 1117 (task_func_t *)zio_execute, zio, flags); 1118#endif 1119} 1120 1121static boolean_t 1122zio_taskq_member(zio_t *zio, enum zio_taskq_type q) 1123{ 1124 kthread_t *executor = zio->io_executor; 1125 spa_t *spa = zio->io_spa; 1126 1127 for (zio_type_t t = 0; t < ZIO_TYPES; t++) 1128 if (taskq_member(spa->spa_zio_taskq[t][q], executor)) 1129 return (B_TRUE); 1130 1131 return (B_FALSE); 1132} 1133 1134static int 1135zio_issue_async(zio_t *zio) 1136{ 1137 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1138 1139 return (ZIO_PIPELINE_STOP); 1140} 1141 1142void 1143zio_interrupt(zio_t *zio) 1144{ 1145 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1146} 1147 1148/* 1149 * Execute the I/O pipeline until one of the following occurs: 1150 * (1) the I/O completes; (2) the pipeline stalls waiting for 1151 * dependent child I/Os; (3) the I/O issues, so we're waiting 1152 * for an I/O completion interrupt; (4) the I/O is delegated by 1153 * vdev-level caching or aggregation; (5) the I/O is deferred 1154 * due to vdev-level queueing; (6) the I/O is handed off to 1155 * another thread. In all cases, the pipeline stops whenever 1156 * there's no CPU work; it never burns a thread in cv_wait(). 1157 * 1158 * There's no locking on io_stage because there's no legitimate way 1159 * for multiple threads to be attempting to process the same I/O. 1160 */ 1161static zio_pipe_stage_t *zio_pipeline[]; 1162 1163void 1164zio_execute(zio_t *zio) 1165{ 1166 zio->io_executor = curthread; 1167 1168 while (zio->io_stage < ZIO_STAGE_DONE) { 1169 enum zio_stage pipeline = zio->io_pipeline; 1170 enum zio_stage stage = zio->io_stage; 1171 int rv; 1172 1173 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1174 ASSERT(ISP2(stage)); 1175 ASSERT(zio->io_stall == NULL); 1176 1177 do { 1178 stage <<= 1; 1179 } while ((stage & pipeline) == 0); 1180 1181 ASSERT(stage <= ZIO_STAGE_DONE); 1182 1183 /* 1184 * If we are in interrupt context and this pipeline stage 1185 * will grab a config lock that is held across I/O, 1186 * or may wait for an I/O that needs an interrupt thread 1187 * to complete, issue async to avoid deadlock. 1188 * 1189 * For VDEV_IO_START, we cut in line so that the io will 1190 * be sent to disk promptly. 1191 */ 1192 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1193 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1194 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1195 zio_requeue_io_start_cut_in_line : B_FALSE; 1196 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1197 return; 1198 } 1199 1200 zio->io_stage = stage; 1201 rv = zio_pipeline[highbit(stage) - 1](zio); 1202 1203 if (rv == ZIO_PIPELINE_STOP) 1204 return; 1205 1206 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1207 } 1208} 1209 1210/* 1211 * ========================================================================== 1212 * Initiate I/O, either sync or async 1213 * ========================================================================== 1214 */ 1215int 1216zio_wait(zio_t *zio) 1217{ 1218 int error; 1219 1220 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1221 ASSERT(zio->io_executor == NULL); 1222 1223 zio->io_waiter = curthread; 1224 1225 zio_execute(zio); 1226 1227 mutex_enter(&zio->io_lock); 1228 while (zio->io_executor != NULL) 1229 cv_wait(&zio->io_cv, &zio->io_lock); 1230 mutex_exit(&zio->io_lock); 1231 1232 error = zio->io_error; 1233 zio_destroy(zio); 1234 1235 return (error); 1236} 1237 1238void 1239zio_nowait(zio_t *zio) 1240{ 1241 ASSERT(zio->io_executor == NULL); 1242 1243 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1244 zio_unique_parent(zio) == NULL) { 1245 /* 1246 * This is a logical async I/O with no parent to wait for it. 1247 * We add it to the spa_async_root_zio "Godfather" I/O which 1248 * will ensure they complete prior to unloading the pool. 1249 */ 1250 spa_t *spa = zio->io_spa; 1251 1252 zio_add_child(spa->spa_async_zio_root, zio); 1253 } 1254 1255 zio_execute(zio); 1256} 1257 1258/* 1259 * ========================================================================== 1260 * Reexecute or suspend/resume failed I/O 1261 * ========================================================================== 1262 */ 1263 1264static void 1265zio_reexecute(zio_t *pio) 1266{ 1267 zio_t *cio, *cio_next; 1268 1269 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1270 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1271 ASSERT(pio->io_gang_leader == NULL); 1272 ASSERT(pio->io_gang_tree == NULL); 1273 1274 pio->io_flags = pio->io_orig_flags; 1275 pio->io_stage = pio->io_orig_stage; 1276 pio->io_pipeline = pio->io_orig_pipeline; 1277 pio->io_reexecute = 0; 1278 pio->io_error = 0; 1279 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1280 pio->io_state[w] = 0; 1281 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1282 pio->io_child_error[c] = 0; 1283 1284 if (IO_IS_ALLOCATING(pio)) 1285 BP_ZERO(pio->io_bp); 1286 1287 /* 1288 * As we reexecute pio's children, new children could be created. 1289 * New children go to the head of pio's io_child_list, however, 1290 * so we will (correctly) not reexecute them. The key is that 1291 * the remainder of pio's io_child_list, from 'cio_next' onward, 1292 * cannot be affected by any side effects of reexecuting 'cio'. 1293 */ 1294 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1295 cio_next = zio_walk_children(pio); 1296 mutex_enter(&pio->io_lock); 1297 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1298 pio->io_children[cio->io_child_type][w]++; 1299 mutex_exit(&pio->io_lock); 1300 zio_reexecute(cio); 1301 } 1302 1303 /* 1304 * Now that all children have been reexecuted, execute the parent. 1305 * We don't reexecute "The Godfather" I/O here as it's the 1306 * responsibility of the caller to wait on him. 1307 */ 1308 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1309 zio_execute(pio); 1310} 1311 1312void 1313zio_suspend(spa_t *spa, zio_t *zio) 1314{ 1315 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1316 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1317 "failure and the failure mode property for this pool " 1318 "is set to panic.", spa_name(spa)); 1319 1320 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1321 1322 mutex_enter(&spa->spa_suspend_lock); 1323 1324 if (spa->spa_suspend_zio_root == NULL) 1325 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1326 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1327 ZIO_FLAG_GODFATHER); 1328 1329 spa->spa_suspended = B_TRUE; 1330 1331 if (zio != NULL) { 1332 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1333 ASSERT(zio != spa->spa_suspend_zio_root); 1334 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1335 ASSERT(zio_unique_parent(zio) == NULL); 1336 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1337 zio_add_child(spa->spa_suspend_zio_root, zio); 1338 } 1339 1340 mutex_exit(&spa->spa_suspend_lock); 1341} 1342 1343int 1344zio_resume(spa_t *spa) 1345{ 1346 zio_t *pio; 1347 1348 /* 1349 * Reexecute all previously suspended i/o. 1350 */ 1351 mutex_enter(&spa->spa_suspend_lock); 1352 spa->spa_suspended = B_FALSE; 1353 cv_broadcast(&spa->spa_suspend_cv); 1354 pio = spa->spa_suspend_zio_root; 1355 spa->spa_suspend_zio_root = NULL; 1356 mutex_exit(&spa->spa_suspend_lock); 1357 1358 if (pio == NULL) 1359 return (0); 1360 1361 zio_reexecute(pio); 1362 return (zio_wait(pio)); 1363} 1364 1365void 1366zio_resume_wait(spa_t *spa) 1367{ 1368 mutex_enter(&spa->spa_suspend_lock); 1369 while (spa_suspended(spa)) 1370 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1371 mutex_exit(&spa->spa_suspend_lock); 1372} 1373 1374/* 1375 * ========================================================================== 1376 * Gang blocks. 1377 * 1378 * A gang block is a collection of small blocks that looks to the DMU 1379 * like one large block. When zio_dva_allocate() cannot find a block 1380 * of the requested size, due to either severe fragmentation or the pool 1381 * being nearly full, it calls zio_write_gang_block() to construct the 1382 * block from smaller fragments. 1383 * 1384 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1385 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1386 * an indirect block: it's an array of block pointers. It consumes 1387 * only one sector and hence is allocatable regardless of fragmentation. 1388 * The gang header's bps point to its gang members, which hold the data. 1389 * 1390 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1391 * as the verifier to ensure uniqueness of the SHA256 checksum. 1392 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1393 * not the gang header. This ensures that data block signatures (needed for 1394 * deduplication) are independent of how the block is physically stored. 1395 * 1396 * Gang blocks can be nested: a gang member may itself be a gang block. 1397 * Thus every gang block is a tree in which root and all interior nodes are 1398 * gang headers, and the leaves are normal blocks that contain user data. 1399 * The root of the gang tree is called the gang leader. 1400 * 1401 * To perform any operation (read, rewrite, free, claim) on a gang block, 1402 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1403 * in the io_gang_tree field of the original logical i/o by recursively 1404 * reading the gang leader and all gang headers below it. This yields 1405 * an in-core tree containing the contents of every gang header and the 1406 * bps for every constituent of the gang block. 1407 * 1408 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1409 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1410 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1411 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1412 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1413 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1414 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1415 * of the gang header plus zio_checksum_compute() of the data to update the 1416 * gang header's blk_cksum as described above. 1417 * 1418 * The two-phase assemble/issue model solves the problem of partial failure -- 1419 * what if you'd freed part of a gang block but then couldn't read the 1420 * gang header for another part? Assembling the entire gang tree first 1421 * ensures that all the necessary gang header I/O has succeeded before 1422 * starting the actual work of free, claim, or write. Once the gang tree 1423 * is assembled, free and claim are in-memory operations that cannot fail. 1424 * 1425 * In the event that a gang write fails, zio_dva_unallocate() walks the 1426 * gang tree to immediately free (i.e. insert back into the space map) 1427 * everything we've allocated. This ensures that we don't get ENOSPC 1428 * errors during repeated suspend/resume cycles due to a flaky device. 1429 * 1430 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1431 * the gang tree, we won't modify the block, so we can safely defer the free 1432 * (knowing that the block is still intact). If we *can* assemble the gang 1433 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1434 * each constituent bp and we can allocate a new block on the next sync pass. 1435 * 1436 * In all cases, the gang tree allows complete recovery from partial failure. 1437 * ========================================================================== 1438 */ 1439 1440static zio_t * 1441zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1442{ 1443 if (gn != NULL) 1444 return (pio); 1445 1446 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1447 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1448 &pio->io_bookmark)); 1449} 1450 1451zio_t * 1452zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1453{ 1454 zio_t *zio; 1455 1456 if (gn != NULL) { 1457 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1458 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1459 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1460 /* 1461 * As we rewrite each gang header, the pipeline will compute 1462 * a new gang block header checksum for it; but no one will 1463 * compute a new data checksum, so we do that here. The one 1464 * exception is the gang leader: the pipeline already computed 1465 * its data checksum because that stage precedes gang assembly. 1466 * (Presently, nothing actually uses interior data checksums; 1467 * this is just good hygiene.) 1468 */ 1469 if (gn != pio->io_gang_leader->io_gang_tree) { 1470 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1471 data, BP_GET_PSIZE(bp)); 1472 } 1473 /* 1474 * If we are here to damage data for testing purposes, 1475 * leave the GBH alone so that we can detect the damage. 1476 */ 1477 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1478 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1479 } else { 1480 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1481 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1482 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1483 } 1484 1485 return (zio); 1486} 1487 1488/* ARGSUSED */ 1489zio_t * 1490zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1491{ 1492 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1493 ZIO_GANG_CHILD_FLAGS(pio))); 1494} 1495 1496/* ARGSUSED */ 1497zio_t * 1498zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1499{ 1500 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1501 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1502} 1503 1504static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1505 NULL, 1506 zio_read_gang, 1507 zio_rewrite_gang, 1508 zio_free_gang, 1509 zio_claim_gang, 1510 NULL 1511}; 1512 1513static void zio_gang_tree_assemble_done(zio_t *zio); 1514 1515static zio_gang_node_t * 1516zio_gang_node_alloc(zio_gang_node_t **gnpp) 1517{ 1518 zio_gang_node_t *gn; 1519 1520 ASSERT(*gnpp == NULL); 1521 1522 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1523 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1524 *gnpp = gn; 1525 1526 return (gn); 1527} 1528 1529static void 1530zio_gang_node_free(zio_gang_node_t **gnpp) 1531{ 1532 zio_gang_node_t *gn = *gnpp; 1533 1534 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1535 ASSERT(gn->gn_child[g] == NULL); 1536 1537 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1538 kmem_free(gn, sizeof (*gn)); 1539 *gnpp = NULL; 1540} 1541 1542static void 1543zio_gang_tree_free(zio_gang_node_t **gnpp) 1544{ 1545 zio_gang_node_t *gn = *gnpp; 1546 1547 if (gn == NULL) 1548 return; 1549 1550 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1551 zio_gang_tree_free(&gn->gn_child[g]); 1552 1553 zio_gang_node_free(gnpp); 1554} 1555 1556static void 1557zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1558{ 1559 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1560 1561 ASSERT(gio->io_gang_leader == gio); 1562 ASSERT(BP_IS_GANG(bp)); 1563 1564 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1565 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1566 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1567} 1568 1569static void 1570zio_gang_tree_assemble_done(zio_t *zio) 1571{ 1572 zio_t *gio = zio->io_gang_leader; 1573 zio_gang_node_t *gn = zio->io_private; 1574 blkptr_t *bp = zio->io_bp; 1575 1576 ASSERT(gio == zio_unique_parent(zio)); 1577 ASSERT(zio->io_child_count == 0); 1578 1579 if (zio->io_error) 1580 return; 1581 1582 if (BP_SHOULD_BYTESWAP(bp)) 1583 byteswap_uint64_array(zio->io_data, zio->io_size); 1584 1585 ASSERT(zio->io_data == gn->gn_gbh); 1586 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1587 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1588 1589 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1590 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1591 if (!BP_IS_GANG(gbp)) 1592 continue; 1593 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1594 } 1595} 1596 1597static void 1598zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1599{ 1600 zio_t *gio = pio->io_gang_leader; 1601 zio_t *zio; 1602 1603 ASSERT(BP_IS_GANG(bp) == !!gn); 1604 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1605 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1606 1607 /* 1608 * If you're a gang header, your data is in gn->gn_gbh. 1609 * If you're a gang member, your data is in 'data' and gn == NULL. 1610 */ 1611 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1612 1613 if (gn != NULL) { 1614 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1615 1616 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1617 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1618 if (BP_IS_HOLE(gbp)) 1619 continue; 1620 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1621 data = (char *)data + BP_GET_PSIZE(gbp); 1622 } 1623 } 1624 1625 if (gn == gio->io_gang_tree) 1626 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1627 1628 if (zio != pio) 1629 zio_nowait(zio); 1630} 1631 1632static int 1633zio_gang_assemble(zio_t *zio) 1634{ 1635 blkptr_t *bp = zio->io_bp; 1636 1637 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1638 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1639 1640 zio->io_gang_leader = zio; 1641 1642 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1643 1644 return (ZIO_PIPELINE_CONTINUE); 1645} 1646 1647static int 1648zio_gang_issue(zio_t *zio) 1649{ 1650 blkptr_t *bp = zio->io_bp; 1651 1652 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1653 return (ZIO_PIPELINE_STOP); 1654 1655 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1656 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1657 1658 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1659 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1660 else 1661 zio_gang_tree_free(&zio->io_gang_tree); 1662 1663 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1664 1665 return (ZIO_PIPELINE_CONTINUE); 1666} 1667 1668static void 1669zio_write_gang_member_ready(zio_t *zio) 1670{ 1671 zio_t *pio = zio_unique_parent(zio); 1672 zio_t *gio = zio->io_gang_leader; 1673 dva_t *cdva = zio->io_bp->blk_dva; 1674 dva_t *pdva = pio->io_bp->blk_dva; 1675 uint64_t asize; 1676 1677 if (BP_IS_HOLE(zio->io_bp)) 1678 return; 1679 1680 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1681 1682 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1683 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1684 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1685 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1686 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1687 1688 mutex_enter(&pio->io_lock); 1689 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1690 ASSERT(DVA_GET_GANG(&pdva[d])); 1691 asize = DVA_GET_ASIZE(&pdva[d]); 1692 asize += DVA_GET_ASIZE(&cdva[d]); 1693 DVA_SET_ASIZE(&pdva[d], asize); 1694 } 1695 mutex_exit(&pio->io_lock); 1696} 1697 1698static int 1699zio_write_gang_block(zio_t *pio) 1700{ 1701 spa_t *spa = pio->io_spa; 1702 blkptr_t *bp = pio->io_bp; 1703 zio_t *gio = pio->io_gang_leader; 1704 zio_t *zio; 1705 zio_gang_node_t *gn, **gnpp; 1706 zio_gbh_phys_t *gbh; 1707 uint64_t txg = pio->io_txg; 1708 uint64_t resid = pio->io_size; 1709 uint64_t lsize; 1710 int copies = gio->io_prop.zp_copies; 1711 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 1712 zio_prop_t zp; 1713 int error; 1714 1715 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 1716 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 1717 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1718 if (error) { 1719 pio->io_error = error; 1720 return (ZIO_PIPELINE_CONTINUE); 1721 } 1722 1723 if (pio == gio) { 1724 gnpp = &gio->io_gang_tree; 1725 } else { 1726 gnpp = pio->io_private; 1727 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1728 } 1729 1730 gn = zio_gang_node_alloc(gnpp); 1731 gbh = gn->gn_gbh; 1732 bzero(gbh, SPA_GANGBLOCKSIZE); 1733 1734 /* 1735 * Create the gang header. 1736 */ 1737 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1738 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1739 1740 /* 1741 * Create and nowait the gang children. 1742 */ 1743 for (int g = 0; resid != 0; resid -= lsize, g++) { 1744 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1745 SPA_MINBLOCKSIZE); 1746 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1747 1748 zp.zp_checksum = gio->io_prop.zp_checksum; 1749 zp.zp_compress = ZIO_COMPRESS_OFF; 1750 zp.zp_type = DMU_OT_NONE; 1751 zp.zp_level = 0; 1752 zp.zp_copies = gio->io_prop.zp_copies; 1753 zp.zp_dedup = 0; 1754 zp.zp_dedup_verify = 0; 1755 1756 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1757 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1758 zio_write_gang_member_ready, NULL, &gn->gn_child[g], 1759 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1760 &pio->io_bookmark)); 1761 } 1762 1763 /* 1764 * Set pio's pipeline to just wait for zio to finish. 1765 */ 1766 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1767 1768 zio_nowait(zio); 1769 1770 return (ZIO_PIPELINE_CONTINUE); 1771} 1772 1773/* 1774 * ========================================================================== 1775 * Dedup 1776 * ========================================================================== 1777 */ 1778static void 1779zio_ddt_child_read_done(zio_t *zio) 1780{ 1781 blkptr_t *bp = zio->io_bp; 1782 ddt_entry_t *dde = zio->io_private; 1783 ddt_phys_t *ddp; 1784 zio_t *pio = zio_unique_parent(zio); 1785 1786 mutex_enter(&pio->io_lock); 1787 ddp = ddt_phys_select(dde, bp); 1788 if (zio->io_error == 0) 1789 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 1790 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 1791 dde->dde_repair_data = zio->io_data; 1792 else 1793 zio_buf_free(zio->io_data, zio->io_size); 1794 mutex_exit(&pio->io_lock); 1795} 1796 1797static int 1798zio_ddt_read_start(zio_t *zio) 1799{ 1800 blkptr_t *bp = zio->io_bp; 1801 1802 ASSERT(BP_GET_DEDUP(bp)); 1803 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1804 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1805 1806 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1807 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1808 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 1809 ddt_phys_t *ddp = dde->dde_phys; 1810 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 1811 blkptr_t blk; 1812 1813 ASSERT(zio->io_vsd == NULL); 1814 zio->io_vsd = dde; 1815 1816 if (ddp_self == NULL) 1817 return (ZIO_PIPELINE_CONTINUE); 1818 1819 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1820 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 1821 continue; 1822 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 1823 &blk); 1824 zio_nowait(zio_read(zio, zio->io_spa, &blk, 1825 zio_buf_alloc(zio->io_size), zio->io_size, 1826 zio_ddt_child_read_done, dde, zio->io_priority, 1827 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 1828 &zio->io_bookmark)); 1829 } 1830 return (ZIO_PIPELINE_CONTINUE); 1831 } 1832 1833 zio_nowait(zio_read(zio, zio->io_spa, bp, 1834 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 1835 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 1836 1837 return (ZIO_PIPELINE_CONTINUE); 1838} 1839 1840static int 1841zio_ddt_read_done(zio_t *zio) 1842{ 1843 blkptr_t *bp = zio->io_bp; 1844 1845 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 1846 return (ZIO_PIPELINE_STOP); 1847 1848 ASSERT(BP_GET_DEDUP(bp)); 1849 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1850 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1851 1852 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1853 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1854 ddt_entry_t *dde = zio->io_vsd; 1855 if (ddt == NULL) { 1856 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 1857 return (ZIO_PIPELINE_CONTINUE); 1858 } 1859 if (dde == NULL) { 1860 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 1861 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1862 return (ZIO_PIPELINE_STOP); 1863 } 1864 if (dde->dde_repair_data != NULL) { 1865 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 1866 zio->io_child_error[ZIO_CHILD_DDT] = 0; 1867 } 1868 ddt_repair_done(ddt, dde); 1869 zio->io_vsd = NULL; 1870 } 1871 1872 ASSERT(zio->io_vsd == NULL); 1873 1874 return (ZIO_PIPELINE_CONTINUE); 1875} 1876 1877static boolean_t 1878zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 1879{ 1880 spa_t *spa = zio->io_spa; 1881 1882 /* 1883 * Note: we compare the original data, not the transformed data, 1884 * because when zio->io_bp is an override bp, we will not have 1885 * pushed the I/O transforms. That's an important optimization 1886 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 1887 */ 1888 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 1889 zio_t *lio = dde->dde_lead_zio[p]; 1890 1891 if (lio != NULL) { 1892 return (lio->io_orig_size != zio->io_orig_size || 1893 bcmp(zio->io_orig_data, lio->io_orig_data, 1894 zio->io_orig_size) != 0); 1895 } 1896 } 1897 1898 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 1899 ddt_phys_t *ddp = &dde->dde_phys[p]; 1900 1901 if (ddp->ddp_phys_birth != 0) { 1902 arc_buf_t *abuf = NULL; 1903 uint32_t aflags = ARC_WAIT; 1904 blkptr_t blk = *zio->io_bp; 1905 int error; 1906 1907 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 1908 1909 ddt_exit(ddt); 1910 1911 error = arc_read_nolock(NULL, spa, &blk, 1912 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 1913 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1914 &aflags, &zio->io_bookmark); 1915 1916 if (error == 0) { 1917 if (arc_buf_size(abuf) != zio->io_orig_size || 1918 bcmp(abuf->b_data, zio->io_orig_data, 1919 zio->io_orig_size) != 0) 1920 error = EEXIST; 1921 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1922 } 1923 1924 ddt_enter(ddt); 1925 return (error != 0); 1926 } 1927 } 1928 1929 return (B_FALSE); 1930} 1931 1932static void 1933zio_ddt_child_write_ready(zio_t *zio) 1934{ 1935 int p = zio->io_prop.zp_copies; 1936 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 1937 ddt_entry_t *dde = zio->io_private; 1938 ddt_phys_t *ddp = &dde->dde_phys[p]; 1939 zio_t *pio; 1940 1941 if (zio->io_error) 1942 return; 1943 1944 ddt_enter(ddt); 1945 1946 ASSERT(dde->dde_lead_zio[p] == zio); 1947 1948 ddt_phys_fill(ddp, zio->io_bp); 1949 1950 while ((pio = zio_walk_parents(zio)) != NULL) 1951 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 1952 1953 ddt_exit(ddt); 1954} 1955 1956static void 1957zio_ddt_child_write_done(zio_t *zio) 1958{ 1959 int p = zio->io_prop.zp_copies; 1960 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 1961 ddt_entry_t *dde = zio->io_private; 1962 ddt_phys_t *ddp = &dde->dde_phys[p]; 1963 1964 ddt_enter(ddt); 1965 1966 ASSERT(ddp->ddp_refcnt == 0); 1967 ASSERT(dde->dde_lead_zio[p] == zio); 1968 dde->dde_lead_zio[p] = NULL; 1969 1970 if (zio->io_error == 0) { 1971 while (zio_walk_parents(zio) != NULL) 1972 ddt_phys_addref(ddp); 1973 } else { 1974 ddt_phys_clear(ddp); 1975 } 1976 1977 ddt_exit(ddt); 1978} 1979 1980static void 1981zio_ddt_ditto_write_done(zio_t *zio) 1982{ 1983 int p = DDT_PHYS_DITTO; 1984 zio_prop_t *zp = &zio->io_prop; 1985 blkptr_t *bp = zio->io_bp; 1986 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1987 ddt_entry_t *dde = zio->io_private; 1988 ddt_phys_t *ddp = &dde->dde_phys[p]; 1989 ddt_key_t *ddk = &dde->dde_key; 1990 1991 ddt_enter(ddt); 1992 1993 ASSERT(ddp->ddp_refcnt == 0); 1994 ASSERT(dde->dde_lead_zio[p] == zio); 1995 dde->dde_lead_zio[p] = NULL; 1996 1997 if (zio->io_error == 0) { 1998 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 1999 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2000 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2001 if (ddp->ddp_phys_birth != 0) 2002 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2003 ddt_phys_fill(ddp, bp); 2004 } 2005 2006 ddt_exit(ddt); 2007} 2008 2009static int 2010zio_ddt_write(zio_t *zio) 2011{ 2012 spa_t *spa = zio->io_spa; 2013 blkptr_t *bp = zio->io_bp; 2014 uint64_t txg = zio->io_txg; 2015 zio_prop_t *zp = &zio->io_prop; 2016 int p = zp->zp_copies; 2017 int ditto_copies; 2018 zio_t *cio = NULL; 2019 zio_t *dio = NULL; 2020 ddt_t *ddt = ddt_select(spa, bp); 2021 ddt_entry_t *dde; 2022 ddt_phys_t *ddp; 2023 2024 ASSERT(BP_GET_DEDUP(bp)); 2025 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2026 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2027 2028 ddt_enter(ddt); 2029 dde = ddt_lookup(ddt, bp, B_TRUE); 2030 ddp = &dde->dde_phys[p]; 2031 2032 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2033 /* 2034 * If we're using a weak checksum, upgrade to a strong checksum 2035 * and try again. If we're already using a strong checksum, 2036 * we can't resolve it, so just convert to an ordinary write. 2037 * (And automatically e-mail a paper to Nature?) 2038 */ 2039 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2040 zp->zp_checksum = spa_dedup_checksum(spa); 2041 zio_pop_transforms(zio); 2042 zio->io_stage = ZIO_STAGE_OPEN; 2043 BP_ZERO(bp); 2044 } else { 2045 zp->zp_dedup = 0; 2046 } 2047 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2048 ddt_exit(ddt); 2049 return (ZIO_PIPELINE_CONTINUE); 2050 } 2051 2052 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2053 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2054 2055 if (ditto_copies > ddt_ditto_copies_present(dde) && 2056 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2057 zio_prop_t czp = *zp; 2058 2059 czp.zp_copies = ditto_copies; 2060 2061 /* 2062 * If we arrived here with an override bp, we won't have run 2063 * the transform stack, so we won't have the data we need to 2064 * generate a child i/o. So, toss the override bp and restart. 2065 * This is safe, because using the override bp is just an 2066 * optimization; and it's rare, so the cost doesn't matter. 2067 */ 2068 if (zio->io_bp_override) { 2069 zio_pop_transforms(zio); 2070 zio->io_stage = ZIO_STAGE_OPEN; 2071 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2072 zio->io_bp_override = NULL; 2073 BP_ZERO(bp); 2074 ddt_exit(ddt); 2075 return (ZIO_PIPELINE_CONTINUE); 2076 } 2077 2078 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2079 zio->io_orig_size, &czp, NULL, 2080 zio_ddt_ditto_write_done, dde, zio->io_priority, 2081 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2082 2083 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2084 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2085 } 2086 2087 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2088 if (ddp->ddp_phys_birth != 0) 2089 ddt_bp_fill(ddp, bp, txg); 2090 if (dde->dde_lead_zio[p] != NULL) 2091 zio_add_child(zio, dde->dde_lead_zio[p]); 2092 else 2093 ddt_phys_addref(ddp); 2094 } else if (zio->io_bp_override) { 2095 ASSERT(bp->blk_birth == txg); 2096 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2097 ddt_phys_fill(ddp, bp); 2098 ddt_phys_addref(ddp); 2099 } else { 2100 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2101 zio->io_orig_size, zp, zio_ddt_child_write_ready, 2102 zio_ddt_child_write_done, dde, zio->io_priority, 2103 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2104 2105 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2106 dde->dde_lead_zio[p] = cio; 2107 } 2108 2109 ddt_exit(ddt); 2110 2111 if (cio) 2112 zio_nowait(cio); 2113 if (dio) 2114 zio_nowait(dio); 2115 2116 return (ZIO_PIPELINE_CONTINUE); 2117} 2118 2119ddt_entry_t *freedde; /* for debugging */ 2120 2121static int 2122zio_ddt_free(zio_t *zio) 2123{ 2124 spa_t *spa = zio->io_spa; 2125 blkptr_t *bp = zio->io_bp; 2126 ddt_t *ddt = ddt_select(spa, bp); 2127 ddt_entry_t *dde; 2128 ddt_phys_t *ddp; 2129 2130 ASSERT(BP_GET_DEDUP(bp)); 2131 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2132 2133 ddt_enter(ddt); 2134 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2135 ddp = ddt_phys_select(dde, bp); 2136 ddt_phys_decref(ddp); 2137 ddt_exit(ddt); 2138 2139 return (ZIO_PIPELINE_CONTINUE); 2140} 2141 2142/* 2143 * ========================================================================== 2144 * Allocate and free blocks 2145 * ========================================================================== 2146 */ 2147static int 2148zio_dva_allocate(zio_t *zio) 2149{ 2150 spa_t *spa = zio->io_spa; 2151 metaslab_class_t *mc = spa_normal_class(spa); 2152 blkptr_t *bp = zio->io_bp; 2153 int error; 2154 int flags = 0; 2155 2156 if (zio->io_gang_leader == NULL) { 2157 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2158 zio->io_gang_leader = zio; 2159 } 2160 2161 ASSERT(BP_IS_HOLE(bp)); 2162 ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 2163 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2164 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2165 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2166 2167 /* 2168 * The dump device does not support gang blocks so allocation on 2169 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid 2170 * the "fast" gang feature. 2171 */ 2172 flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; 2173 flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? 2174 METASLAB_GANG_CHILD : 0; 2175 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2176 zio->io_prop.zp_copies, zio->io_txg, NULL, flags); 2177 2178 if (error) { 2179 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2180 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2181 error); 2182 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2183 return (zio_write_gang_block(zio)); 2184 zio->io_error = error; 2185 } 2186 2187 return (ZIO_PIPELINE_CONTINUE); 2188} 2189 2190static int 2191zio_dva_free(zio_t *zio) 2192{ 2193 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2194 2195 return (ZIO_PIPELINE_CONTINUE); 2196} 2197 2198static int 2199zio_dva_claim(zio_t *zio) 2200{ 2201 int error; 2202 2203 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2204 if (error) 2205 zio->io_error = error; 2206 2207 return (ZIO_PIPELINE_CONTINUE); 2208} 2209 2210/* 2211 * Undo an allocation. This is used by zio_done() when an I/O fails 2212 * and we want to give back the block we just allocated. 2213 * This handles both normal blocks and gang blocks. 2214 */ 2215static void 2216zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2217{ 2218 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2219 ASSERT(zio->io_bp_override == NULL); 2220 2221 if (!BP_IS_HOLE(bp)) 2222 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2223 2224 if (gn != NULL) { 2225 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2226 zio_dva_unallocate(zio, gn->gn_child[g], 2227 &gn->gn_gbh->zg_blkptr[g]); 2228 } 2229 } 2230} 2231 2232/* 2233 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2234 */ 2235int 2236zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2237 uint64_t size, boolean_t use_slog) 2238{ 2239 int error = 1; 2240 2241 ASSERT(txg > spa_syncing_txg(spa)); 2242 2243 /* 2244 * ZIL blocks are always contiguous (i.e. not gang blocks) so we 2245 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" 2246 * when allocating them. 2247 */ 2248 if (use_slog) { 2249 error = metaslab_alloc(spa, spa_log_class(spa), size, 2250 new_bp, 1, txg, old_bp, 2251 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2252 } 2253 2254 if (error) { 2255 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2256 new_bp, 1, txg, old_bp, 2257 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2258 } 2259 2260 if (error == 0) { 2261 BP_SET_LSIZE(new_bp, size); 2262 BP_SET_PSIZE(new_bp, size); 2263 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2264 BP_SET_CHECKSUM(new_bp, 2265 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2266 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2267 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2268 BP_SET_LEVEL(new_bp, 0); 2269 BP_SET_DEDUP(new_bp, 0); 2270 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2271 } 2272 2273 return (error); 2274} 2275 2276/* 2277 * Free an intent log block. 2278 */ 2279void 2280zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2281{ 2282 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2283 ASSERT(!BP_IS_GANG(bp)); 2284 2285 zio_free(spa, txg, bp); 2286} 2287 2288/* 2289 * ========================================================================== 2290 * Read and write to physical devices 2291 * ========================================================================== 2292 */ 2293static int 2294zio_vdev_io_start(zio_t *zio) 2295{ 2296 vdev_t *vd = zio->io_vd; 2297 uint64_t align; 2298 spa_t *spa = zio->io_spa; 2299 2300 ASSERT(zio->io_error == 0); 2301 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2302 2303 if (vd == NULL) { 2304 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2305 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2306 2307 /* 2308 * The mirror_ops handle multiple DVAs in a single BP. 2309 */ 2310 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2311 } 2312 2313 /* 2314 * We keep track of time-sensitive I/Os so that the scan thread 2315 * can quickly react to certain workloads. In particular, we care 2316 * about non-scrubbing, top-level reads and writes with the following 2317 * characteristics: 2318 * - synchronous writes of user data to non-slog devices 2319 * - any reads of user data 2320 * When these conditions are met, adjust the timestamp of spa_last_io 2321 * which allows the scan thread to adjust its workload accordingly. 2322 */ 2323 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2324 vd == vd->vdev_top && !vd->vdev_islog && 2325 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2326 zio->io_txg != spa_syncing_txg(spa)) { 2327 uint64_t old = spa->spa_last_io; 2328 uint64_t new = ddi_get_lbolt64(); 2329 if (old != new) 2330 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2331 } 2332 2333 align = 1ULL << vd->vdev_top->vdev_ashift; 2334 2335 if (P2PHASE(zio->io_size, align) != 0) { 2336 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2337 char *abuf = zio_buf_alloc(asize); 2338 ASSERT(vd == vd->vdev_top); 2339 if (zio->io_type == ZIO_TYPE_WRITE) { 2340 bcopy(zio->io_data, abuf, zio->io_size); 2341 bzero(abuf + zio->io_size, asize - zio->io_size); 2342 } 2343 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 2344 } 2345 2346 ASSERT(P2PHASE(zio->io_offset, align) == 0); 2347 ASSERT(P2PHASE(zio->io_size, align) == 0); 2348 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 2349 2350 /* 2351 * If this is a repair I/O, and there's no self-healing involved -- 2352 * that is, we're just resilvering what we expect to resilver -- 2353 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2354 * This prevents spurious resilvering with nested replication. 2355 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2356 * A is out of date, we'll read from C+D, then use the data to 2357 * resilver A+B -- but we don't actually want to resilver B, just A. 2358 * The top-level mirror has no way to know this, so instead we just 2359 * discard unnecessary repairs as we work our way down the vdev tree. 2360 * The same logic applies to any form of nested replication: 2361 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2362 */ 2363 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2364 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2365 zio->io_txg != 0 && /* not a delegated i/o */ 2366 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2367 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2368 zio_vdev_io_bypass(zio); 2369 return (ZIO_PIPELINE_CONTINUE); 2370 } 2371 2372 if (vd->vdev_ops->vdev_op_leaf && 2373 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 2374 2375 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) 2376 return (ZIO_PIPELINE_CONTINUE); 2377 2378 if ((zio = vdev_queue_io(zio)) == NULL) 2379 return (ZIO_PIPELINE_STOP); 2380 2381 if (!vdev_accessible(vd, zio)) { 2382 zio->io_error = ENXIO; 2383 zio_interrupt(zio); 2384 return (ZIO_PIPELINE_STOP); 2385 } 2386 } 2387 2388 return (vd->vdev_ops->vdev_op_io_start(zio)); 2389} 2390 2391static int 2392zio_vdev_io_done(zio_t *zio) 2393{ 2394 vdev_t *vd = zio->io_vd; 2395 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2396 boolean_t unexpected_error = B_FALSE; 2397 2398 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2399 return (ZIO_PIPELINE_STOP); 2400 2401 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 2402 2403 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 2404 2405 vdev_queue_io_done(zio); 2406 2407 if (zio->io_type == ZIO_TYPE_WRITE) 2408 vdev_cache_write(zio); 2409 2410 if (zio_injection_enabled && zio->io_error == 0) 2411 zio->io_error = zio_handle_device_injection(vd, 2412 zio, EIO); 2413 2414 if (zio_injection_enabled && zio->io_error == 0) 2415 zio->io_error = zio_handle_label_injection(zio, EIO); 2416 2417 if (zio->io_error) { 2418 if (!vdev_accessible(vd, zio)) { 2419 zio->io_error = ENXIO; 2420 } else { 2421 unexpected_error = B_TRUE; 2422 } 2423 } 2424 } 2425 2426 ops->vdev_op_io_done(zio); 2427 2428 if (unexpected_error) 2429 VERIFY(vdev_probe(vd, zio) == NULL); 2430 2431 return (ZIO_PIPELINE_CONTINUE); 2432} 2433 2434/* 2435 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2436 * disk, and use that to finish the checksum ereport later. 2437 */ 2438static void 2439zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2440 const void *good_buf) 2441{ 2442 /* no processing needed */ 2443 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2444} 2445 2446/*ARGSUSED*/ 2447void 2448zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2449{ 2450 void *buf = zio_buf_alloc(zio->io_size); 2451 2452 bcopy(zio->io_data, buf, zio->io_size); 2453 2454 zcr->zcr_cbinfo = zio->io_size; 2455 zcr->zcr_cbdata = buf; 2456 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2457 zcr->zcr_free = zio_buf_free; 2458} 2459 2460static int 2461zio_vdev_io_assess(zio_t *zio) 2462{ 2463 vdev_t *vd = zio->io_vd; 2464 2465 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2466 return (ZIO_PIPELINE_STOP); 2467 2468 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2469 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2470 2471 if (zio->io_vsd != NULL) { 2472 zio->io_vsd_ops->vsd_free(zio); 2473 zio->io_vsd = NULL; 2474 } 2475 2476 if (zio_injection_enabled && zio->io_error == 0) 2477 zio->io_error = zio_handle_fault_injection(zio, EIO); 2478 2479 /* 2480 * If the I/O failed, determine whether we should attempt to retry it. 2481 * 2482 * On retry, we cut in line in the issue queue, since we don't want 2483 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2484 */ 2485 if (zio->io_error && vd == NULL && 2486 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2487 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2488 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2489 zio->io_error = 0; 2490 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2491 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2492 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2493 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2494 zio_requeue_io_start_cut_in_line); 2495 return (ZIO_PIPELINE_STOP); 2496 } 2497 2498 /* 2499 * If we got an error on a leaf device, convert it to ENXIO 2500 * if the device is not accessible at all. 2501 */ 2502 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2503 !vdev_accessible(vd, zio)) 2504 zio->io_error = ENXIO; 2505 2506 /* 2507 * If we can't write to an interior vdev (mirror or RAID-Z), 2508 * set vdev_cant_write so that we stop trying to allocate from it. 2509 */ 2510 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2511 vd != NULL && !vd->vdev_ops->vdev_op_leaf) 2512 vd->vdev_cant_write = B_TRUE; 2513 2514 if (zio->io_error) 2515 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2516 2517 return (ZIO_PIPELINE_CONTINUE); 2518} 2519 2520void 2521zio_vdev_io_reissue(zio_t *zio) 2522{ 2523 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2524 ASSERT(zio->io_error == 0); 2525 2526 zio->io_stage >>= 1; 2527} 2528 2529void 2530zio_vdev_io_redone(zio_t *zio) 2531{ 2532 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2533 2534 zio->io_stage >>= 1; 2535} 2536 2537void 2538zio_vdev_io_bypass(zio_t *zio) 2539{ 2540 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2541 ASSERT(zio->io_error == 0); 2542 2543 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2544 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2545} 2546 2547/* 2548 * ========================================================================== 2549 * Generate and verify checksums 2550 * ========================================================================== 2551 */ 2552static int 2553zio_checksum_generate(zio_t *zio) 2554{ 2555 blkptr_t *bp = zio->io_bp; 2556 enum zio_checksum checksum; 2557 2558 if (bp == NULL) { 2559 /* 2560 * This is zio_write_phys(). 2561 * We're either generating a label checksum, or none at all. 2562 */ 2563 checksum = zio->io_prop.zp_checksum; 2564 2565 if (checksum == ZIO_CHECKSUM_OFF) 2566 return (ZIO_PIPELINE_CONTINUE); 2567 2568 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2569 } else { 2570 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2571 ASSERT(!IO_IS_ALLOCATING(zio)); 2572 checksum = ZIO_CHECKSUM_GANG_HEADER; 2573 } else { 2574 checksum = BP_GET_CHECKSUM(bp); 2575 } 2576 } 2577 2578 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2579 2580 return (ZIO_PIPELINE_CONTINUE); 2581} 2582 2583static int 2584zio_checksum_verify(zio_t *zio) 2585{ 2586 zio_bad_cksum_t info; 2587 blkptr_t *bp = zio->io_bp; 2588 int error; 2589 2590 ASSERT(zio->io_vd != NULL); 2591 2592 if (bp == NULL) { 2593 /* 2594 * This is zio_read_phys(). 2595 * We're either verifying a label checksum, or nothing at all. 2596 */ 2597 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2598 return (ZIO_PIPELINE_CONTINUE); 2599 2600 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2601 } 2602 2603 if ((error = zio_checksum_error(zio, &info)) != 0) { 2604 zio->io_error = error; 2605 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2606 zfs_ereport_start_checksum(zio->io_spa, 2607 zio->io_vd, zio, zio->io_offset, 2608 zio->io_size, NULL, &info); 2609 } 2610 } 2611 2612 return (ZIO_PIPELINE_CONTINUE); 2613} 2614 2615/* 2616 * Called by RAID-Z to ensure we don't compute the checksum twice. 2617 */ 2618void 2619zio_checksum_verified(zio_t *zio) 2620{ 2621 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 2622} 2623 2624/* 2625 * ========================================================================== 2626 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2627 * An error of 0 indictes success. ENXIO indicates whole-device failure, 2628 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2629 * indicate errors that are specific to one I/O, and most likely permanent. 2630 * Any other error is presumed to be worse because we weren't expecting it. 2631 * ========================================================================== 2632 */ 2633int 2634zio_worst_error(int e1, int e2) 2635{ 2636 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2637 int r1, r2; 2638 2639 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2640 if (e1 == zio_error_rank[r1]) 2641 break; 2642 2643 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2644 if (e2 == zio_error_rank[r2]) 2645 break; 2646 2647 return (r1 > r2 ? e1 : e2); 2648} 2649 2650/* 2651 * ========================================================================== 2652 * I/O completion 2653 * ========================================================================== 2654 */ 2655static int 2656zio_ready(zio_t *zio) 2657{ 2658 blkptr_t *bp = zio->io_bp; 2659 zio_t *pio, *pio_next; 2660 2661 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 2662 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 2663 return (ZIO_PIPELINE_STOP); 2664 2665 if (zio->io_ready) { 2666 ASSERT(IO_IS_ALLOCATING(zio)); 2667 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2668 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 2669 2670 zio->io_ready(zio); 2671 } 2672 2673 if (bp != NULL && bp != &zio->io_bp_copy) 2674 zio->io_bp_copy = *bp; 2675 2676 if (zio->io_error) 2677 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2678 2679 mutex_enter(&zio->io_lock); 2680 zio->io_state[ZIO_WAIT_READY] = 1; 2681 pio = zio_walk_parents(zio); 2682 mutex_exit(&zio->io_lock); 2683 2684 /* 2685 * As we notify zio's parents, new parents could be added. 2686 * New parents go to the head of zio's io_parent_list, however, 2687 * so we will (correctly) not notify them. The remainder of zio's 2688 * io_parent_list, from 'pio_next' onward, cannot change because 2689 * all parents must wait for us to be done before they can be done. 2690 */ 2691 for (; pio != NULL; pio = pio_next) { 2692 pio_next = zio_walk_parents(zio); 2693 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 2694 } 2695 2696 if (zio->io_flags & ZIO_FLAG_NODATA) { 2697 if (BP_IS_GANG(bp)) { 2698 zio->io_flags &= ~ZIO_FLAG_NODATA; 2699 } else { 2700 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 2701 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2702 } 2703 } 2704 2705 if (zio_injection_enabled && 2706 zio->io_spa->spa_syncing_txg == zio->io_txg) 2707 zio_handle_ignored_writes(zio); 2708 2709 return (ZIO_PIPELINE_CONTINUE); 2710} 2711 2712static int 2713zio_done(zio_t *zio) 2714{ 2715 spa_t *spa = zio->io_spa; 2716 zio_t *lio = zio->io_logical; 2717 blkptr_t *bp = zio->io_bp; 2718 vdev_t *vd = zio->io_vd; 2719 uint64_t psize = zio->io_size; 2720 zio_t *pio, *pio_next; 2721 2722 /* 2723 * If our children haven't all completed, 2724 * wait for them and then repeat this pipeline stage. 2725 */ 2726 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 2727 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 2728 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 2729 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 2730 return (ZIO_PIPELINE_STOP); 2731 2732 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2733 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2734 ASSERT(zio->io_children[c][w] == 0); 2735 2736 if (bp != NULL) { 2737 ASSERT(bp->blk_pad[0] == 0); 2738 ASSERT(bp->blk_pad[1] == 0); 2739 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 2740 (bp == zio_unique_parent(zio)->io_bp)); 2741 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 2742 zio->io_bp_override == NULL && 2743 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 2744 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 2745 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 2746 ASSERT(BP_COUNT_GANG(bp) == 0 || 2747 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 2748 } 2749 } 2750 2751 /* 2752 * If there were child vdev/gang/ddt errors, they apply to us now. 2753 */ 2754 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 2755 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 2756 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 2757 2758 /* 2759 * If the I/O on the transformed data was successful, generate any 2760 * checksum reports now while we still have the transformed data. 2761 */ 2762 if (zio->io_error == 0) { 2763 while (zio->io_cksum_report != NULL) { 2764 zio_cksum_report_t *zcr = zio->io_cksum_report; 2765 uint64_t align = zcr->zcr_align; 2766 uint64_t asize = P2ROUNDUP(psize, align); 2767 char *abuf = zio->io_data; 2768 2769 if (asize != psize) { 2770 abuf = zio_buf_alloc(asize); 2771 bcopy(zio->io_data, abuf, psize); 2772 bzero(abuf + psize, asize - psize); 2773 } 2774 2775 zio->io_cksum_report = zcr->zcr_next; 2776 zcr->zcr_next = NULL; 2777 zcr->zcr_finish(zcr, abuf); 2778 zfs_ereport_free_checksum(zcr); 2779 2780 if (asize != psize) 2781 zio_buf_free(abuf, asize); 2782 } 2783 } 2784 2785 zio_pop_transforms(zio); /* note: may set zio->io_error */ 2786 2787 vdev_stat_update(zio, psize); 2788 2789 if (zio->io_error) { 2790 /* 2791 * If this I/O is attached to a particular vdev, 2792 * generate an error message describing the I/O failure 2793 * at the block level. We ignore these errors if the 2794 * device is currently unavailable. 2795 */ 2796 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 2797 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 2798 2799 if ((zio->io_error == EIO || !(zio->io_flags & 2800 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 2801 zio == lio) { 2802 /* 2803 * For logical I/O requests, tell the SPA to log the 2804 * error and generate a logical data ereport. 2805 */ 2806 spa_log_error(spa, zio); 2807 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 2808 0, 0); 2809 } 2810 } 2811 2812 if (zio->io_error && zio == lio) { 2813 /* 2814 * Determine whether zio should be reexecuted. This will 2815 * propagate all the way to the root via zio_notify_parent(). 2816 */ 2817 ASSERT(vd == NULL && bp != NULL); 2818 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2819 2820 if (IO_IS_ALLOCATING(zio) && 2821 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 2822 if (zio->io_error != ENOSPC) 2823 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 2824 else 2825 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2826 } 2827 2828 if ((zio->io_type == ZIO_TYPE_READ || 2829 zio->io_type == ZIO_TYPE_FREE) && 2830 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 2831 zio->io_error == ENXIO && 2832 spa_load_state(spa) == SPA_LOAD_NONE && 2833 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 2834 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2835 2836 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 2837 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2838 2839 /* 2840 * Here is a possibly good place to attempt to do 2841 * either combinatorial reconstruction or error correction 2842 * based on checksums. It also might be a good place 2843 * to send out preliminary ereports before we suspend 2844 * processing. 2845 */ 2846 } 2847 2848 /* 2849 * If there were logical child errors, they apply to us now. 2850 * We defer this until now to avoid conflating logical child 2851 * errors with errors that happened to the zio itself when 2852 * updating vdev stats and reporting FMA events above. 2853 */ 2854 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 2855 2856 if ((zio->io_error || zio->io_reexecute) && 2857 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 2858 !(zio->io_flags & ZIO_FLAG_IO_REWRITE)) 2859 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 2860 2861 zio_gang_tree_free(&zio->io_gang_tree); 2862 2863 /* 2864 * Godfather I/Os should never suspend. 2865 */ 2866 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 2867 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 2868 zio->io_reexecute = 0; 2869 2870 if (zio->io_reexecute) { 2871 /* 2872 * This is a logical I/O that wants to reexecute. 2873 * 2874 * Reexecute is top-down. When an i/o fails, if it's not 2875 * the root, it simply notifies its parent and sticks around. 2876 * The parent, seeing that it still has children in zio_done(), 2877 * does the same. This percolates all the way up to the root. 2878 * The root i/o will reexecute or suspend the entire tree. 2879 * 2880 * This approach ensures that zio_reexecute() honors 2881 * all the original i/o dependency relationships, e.g. 2882 * parents not executing until children are ready. 2883 */ 2884 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2885 2886 zio->io_gang_leader = NULL; 2887 2888 mutex_enter(&zio->io_lock); 2889 zio->io_state[ZIO_WAIT_DONE] = 1; 2890 mutex_exit(&zio->io_lock); 2891 2892 /* 2893 * "The Godfather" I/O monitors its children but is 2894 * not a true parent to them. It will track them through 2895 * the pipeline but severs its ties whenever they get into 2896 * trouble (e.g. suspended). This allows "The Godfather" 2897 * I/O to return status without blocking. 2898 */ 2899 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2900 zio_link_t *zl = zio->io_walk_link; 2901 pio_next = zio_walk_parents(zio); 2902 2903 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 2904 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 2905 zio_remove_child(pio, zio, zl); 2906 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2907 } 2908 } 2909 2910 if ((pio = zio_unique_parent(zio)) != NULL) { 2911 /* 2912 * We're not a root i/o, so there's nothing to do 2913 * but notify our parent. Don't propagate errors 2914 * upward since we haven't permanently failed yet. 2915 */ 2916 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2917 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 2918 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2919 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 2920 /* 2921 * We'd fail again if we reexecuted now, so suspend 2922 * until conditions improve (e.g. device comes online). 2923 */ 2924 zio_suspend(spa, zio); 2925 } else { 2926 /* 2927 * Reexecution is potentially a huge amount of work. 2928 * Hand it off to the otherwise-unused claim taskq. 2929 */ 2930#ifdef _KERNEL 2931 (void) taskq_dispatch_safe( 2932 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 2933 (task_func_t *)zio_reexecute, zio, TQ_SLEEP, 2934 &zio->io_task); 2935#else 2936 (void) taskq_dispatch( 2937 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 2938 (task_func_t *)zio_reexecute, zio, TQ_SLEEP); 2939#endif 2940 } 2941 return (ZIO_PIPELINE_STOP); 2942 } 2943 2944 ASSERT(zio->io_child_count == 0); 2945 ASSERT(zio->io_reexecute == 0); 2946 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 2947 2948 /* 2949 * Report any checksum errors, since the I/O is complete. 2950 */ 2951 while (zio->io_cksum_report != NULL) { 2952 zio_cksum_report_t *zcr = zio->io_cksum_report; 2953 zio->io_cksum_report = zcr->zcr_next; 2954 zcr->zcr_next = NULL; 2955 zcr->zcr_finish(zcr, NULL); 2956 zfs_ereport_free_checksum(zcr); 2957 } 2958 2959 /* 2960 * It is the responsibility of the done callback to ensure that this 2961 * particular zio is no longer discoverable for adoption, and as 2962 * such, cannot acquire any new parents. 2963 */ 2964 if (zio->io_done) 2965 zio->io_done(zio); 2966 2967 mutex_enter(&zio->io_lock); 2968 zio->io_state[ZIO_WAIT_DONE] = 1; 2969 mutex_exit(&zio->io_lock); 2970 2971 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2972 zio_link_t *zl = zio->io_walk_link; 2973 pio_next = zio_walk_parents(zio); 2974 zio_remove_child(pio, zio, zl); 2975 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2976 } 2977 2978 if (zio->io_waiter != NULL) { 2979 mutex_enter(&zio->io_lock); 2980 zio->io_executor = NULL; 2981 cv_broadcast(&zio->io_cv); 2982 mutex_exit(&zio->io_lock); 2983 } else { 2984 zio_destroy(zio); 2985 } 2986 2987 return (ZIO_PIPELINE_STOP); 2988} 2989 2990/* 2991 * ========================================================================== 2992 * I/O pipeline definition 2993 * ========================================================================== 2994 */ 2995static zio_pipe_stage_t *zio_pipeline[] = { 2996 NULL, 2997 zio_read_bp_init, 2998 zio_free_bp_init, 2999 zio_issue_async, 3000 zio_write_bp_init, 3001 zio_checksum_generate, 3002 zio_ddt_read_start, 3003 zio_ddt_read_done, 3004 zio_ddt_write, 3005 zio_ddt_free, 3006 zio_gang_assemble, 3007 zio_gang_issue, 3008 zio_dva_allocate, 3009 zio_dva_free, 3010 zio_dva_claim, 3011 zio_ready, 3012 zio_vdev_io_start, 3013 zio_vdev_io_done, 3014 zio_vdev_io_assess, 3015 zio_checksum_verify, 3016 zio_done 3017}; 3018