zio.c revision 248571
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 */ 25 26#include <sys/zfs_context.h> 27#include <sys/fm/fs/zfs.h> 28#include <sys/spa.h> 29#include <sys/txg.h> 30#include <sys/spa_impl.h> 31#include <sys/vdev_impl.h> 32#include <sys/zio_impl.h> 33#include <sys/zio_compress.h> 34#include <sys/zio_checksum.h> 35#include <sys/dmu_objset.h> 36#include <sys/arc.h> 37#include <sys/ddt.h> 38#include <sys/trim_map.h> 39 40SYSCTL_DECL(_vfs_zfs); 41SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 42static int zio_use_uma = 0; 43TUNABLE_INT("vfs.zfs.zio.use_uma", &zio_use_uma); 44SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, 45 "Use uma(9) for ZIO allocations"); 46static int zio_exclude_metadata = 0; 47TUNABLE_INT("vfs.zfs.zio.exclude_metadata", &zio_exclude_metadata); 48SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0, 49 "Exclude metadata buffers from dumps as well"); 50 51zio_trim_stats_t zio_trim_stats = { 52 { "bytes", KSTAT_DATA_UINT64, 53 "Number of bytes successfully TRIMmed" }, 54 { "success", KSTAT_DATA_UINT64, 55 "Number of successful TRIM requests" }, 56 { "unsupported", KSTAT_DATA_UINT64, 57 "Number of TRIM requests that failed because TRIM is not supported" }, 58 { "failed", KSTAT_DATA_UINT64, 59 "Number of TRIM requests that failed for reasons other than not supported" }, 60}; 61 62static kstat_t *zio_trim_ksp; 63 64/* 65 * ========================================================================== 66 * I/O priority table 67 * ========================================================================== 68 */ 69uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 70 0, /* ZIO_PRIORITY_NOW */ 71 0, /* ZIO_PRIORITY_SYNC_READ */ 72 0, /* ZIO_PRIORITY_SYNC_WRITE */ 73 0, /* ZIO_PRIORITY_LOG_WRITE */ 74 1, /* ZIO_PRIORITY_CACHE_FILL */ 75 1, /* ZIO_PRIORITY_AGG */ 76 4, /* ZIO_PRIORITY_FREE */ 77 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 78 6, /* ZIO_PRIORITY_ASYNC_READ */ 79 10, /* ZIO_PRIORITY_RESILVER */ 80 20, /* ZIO_PRIORITY_SCRUB */ 81 2, /* ZIO_PRIORITY_DDT_PREFETCH */ 82 30, /* ZIO_PRIORITY_TRIM */ 83}; 84 85/* 86 * ========================================================================== 87 * I/O type descriptions 88 * ========================================================================== 89 */ 90char *zio_type_name[ZIO_TYPES] = { 91 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 92 "zio_ioctl" 93}; 94 95/* 96 * ========================================================================== 97 * I/O kmem caches 98 * ========================================================================== 99 */ 100kmem_cache_t *zio_cache; 101kmem_cache_t *zio_link_cache; 102kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 103kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 104 105#ifdef _KERNEL 106extern vmem_t *zio_alloc_arena; 107#endif 108extern int zfs_mg_alloc_failures; 109 110/* 111 * The following actions directly effect the spa's sync-to-convergence logic. 112 * The values below define the sync pass when we start performing the action. 113 * Care should be taken when changing these values as they directly impact 114 * spa_sync() performance. Tuning these values may introduce subtle performance 115 * pathologies and should only be done in the context of performance analysis. 116 * These tunables will eventually be removed and replaced with #defines once 117 * enough analysis has been done to determine optimal values. 118 * 119 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 120 * regular blocks are not deferred. 121 */ 122int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 123TUNABLE_INT("vfs.zfs.sync_pass_deferred_free", &zfs_sync_pass_deferred_free); 124SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN, 125 &zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass"); 126int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 127TUNABLE_INT("vfs.zfs.sync_pass_dont_compress", &zfs_sync_pass_dont_compress); 128SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN, 129 &zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass"); 130int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 131TUNABLE_INT("vfs.zfs.sync_pass_rewrite", &zfs_sync_pass_rewrite); 132SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN, 133 &zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass"); 134 135/* 136 * An allocating zio is one that either currently has the DVA allocate 137 * stage set or will have it later in its lifetime. 138 */ 139#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 140 141boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 142 143#ifdef ZFS_DEBUG 144int zio_buf_debug_limit = 16384; 145#else 146int zio_buf_debug_limit = 0; 147#endif 148 149void 150zio_init(void) 151{ 152 size_t c; 153 zio_cache = kmem_cache_create("zio_cache", 154 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 155 zio_link_cache = kmem_cache_create("zio_link_cache", 156 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 157 158 /* 159 * For small buffers, we want a cache for each multiple of 160 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 161 * for each quarter-power of 2. For large buffers, we want 162 * a cache for each multiple of PAGESIZE. 163 */ 164 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 165 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 166 size_t p2 = size; 167 size_t align = 0; 168 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 169 170 while (p2 & (p2 - 1)) 171 p2 &= p2 - 1; 172 173#ifdef illumos 174#ifndef _KERNEL 175 /* 176 * If we are using watchpoints, put each buffer on its own page, 177 * to eliminate the performance overhead of trapping to the 178 * kernel when modifying a non-watched buffer that shares the 179 * page with a watched buffer. 180 */ 181 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 182 continue; 183#endif 184#endif /* illumos */ 185 if (size <= 4 * SPA_MINBLOCKSIZE) { 186 align = SPA_MINBLOCKSIZE; 187 } else if (IS_P2ALIGNED(size, PAGESIZE)) { 188 align = PAGESIZE; 189 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 190 align = p2 >> 2; 191 } 192 193 if (align != 0) { 194 char name[36]; 195 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 196 zio_buf_cache[c] = kmem_cache_create(name, size, 197 align, NULL, NULL, NULL, NULL, NULL, cflags); 198 199 /* 200 * Since zio_data bufs do not appear in crash dumps, we 201 * pass KMC_NOTOUCH so that no allocator metadata is 202 * stored with the buffers. 203 */ 204 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 205 zio_data_buf_cache[c] = kmem_cache_create(name, size, 206 align, NULL, NULL, NULL, NULL, NULL, 207 cflags | KMC_NOTOUCH | KMC_NODEBUG); 208 } 209 } 210 211 while (--c != 0) { 212 ASSERT(zio_buf_cache[c] != NULL); 213 if (zio_buf_cache[c - 1] == NULL) 214 zio_buf_cache[c - 1] = zio_buf_cache[c]; 215 216 ASSERT(zio_data_buf_cache[c] != NULL); 217 if (zio_data_buf_cache[c - 1] == NULL) 218 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 219 } 220 221 /* 222 * The zio write taskqs have 1 thread per cpu, allow 1/2 of the taskqs 223 * to fail 3 times per txg or 8 failures, whichever is greater. 224 */ 225 if (zfs_mg_alloc_failures == 0) 226 zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8); 227 else if (zfs_mg_alloc_failures < 8) 228 zfs_mg_alloc_failures = 8; 229 230 zio_inject_init(); 231 232 zio_trim_ksp = kstat_create("zfs", 0, "zio_trim", "misc", 233 KSTAT_TYPE_NAMED, 234 sizeof(zio_trim_stats) / sizeof(kstat_named_t), 235 KSTAT_FLAG_VIRTUAL); 236 237 if (zio_trim_ksp != NULL) { 238 zio_trim_ksp->ks_data = &zio_trim_stats; 239 kstat_install(zio_trim_ksp); 240 } 241} 242 243void 244zio_fini(void) 245{ 246 size_t c; 247 kmem_cache_t *last_cache = NULL; 248 kmem_cache_t *last_data_cache = NULL; 249 250 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 251 if (zio_buf_cache[c] != last_cache) { 252 last_cache = zio_buf_cache[c]; 253 kmem_cache_destroy(zio_buf_cache[c]); 254 } 255 zio_buf_cache[c] = NULL; 256 257 if (zio_data_buf_cache[c] != last_data_cache) { 258 last_data_cache = zio_data_buf_cache[c]; 259 kmem_cache_destroy(zio_data_buf_cache[c]); 260 } 261 zio_data_buf_cache[c] = NULL; 262 } 263 264 kmem_cache_destroy(zio_link_cache); 265 kmem_cache_destroy(zio_cache); 266 267 zio_inject_fini(); 268 269 if (zio_trim_ksp != NULL) { 270 kstat_delete(zio_trim_ksp); 271 zio_trim_ksp = NULL; 272 } 273} 274 275/* 276 * ========================================================================== 277 * Allocate and free I/O buffers 278 * ========================================================================== 279 */ 280 281/* 282 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 283 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 284 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 285 * excess / transient data in-core during a crashdump. 286 */ 287void * 288zio_buf_alloc(size_t size) 289{ 290 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 291 int flags = zio_exclude_metadata ? KM_NODEBUG : 0; 292 293 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 294 295 if (zio_use_uma) 296 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 297 else 298 return (kmem_alloc(size, KM_SLEEP|flags)); 299} 300 301/* 302 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 303 * crashdump if the kernel panics. This exists so that we will limit the amount 304 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 305 * of kernel heap dumped to disk when the kernel panics) 306 */ 307void * 308zio_data_buf_alloc(size_t size) 309{ 310 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 311 312 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 313 314 if (zio_use_uma) 315 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 316 else 317 return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG)); 318} 319 320void 321zio_buf_free(void *buf, size_t size) 322{ 323 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 324 325 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 326 327 if (zio_use_uma) 328 kmem_cache_free(zio_buf_cache[c], buf); 329 else 330 kmem_free(buf, size); 331} 332 333void 334zio_data_buf_free(void *buf, size_t size) 335{ 336 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 337 338 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 339 340 if (zio_use_uma) 341 kmem_cache_free(zio_data_buf_cache[c], buf); 342 else 343 kmem_free(buf, size); 344} 345 346/* 347 * ========================================================================== 348 * Push and pop I/O transform buffers 349 * ========================================================================== 350 */ 351static void 352zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 353 zio_transform_func_t *transform) 354{ 355 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 356 357 zt->zt_orig_data = zio->io_data; 358 zt->zt_orig_size = zio->io_size; 359 zt->zt_bufsize = bufsize; 360 zt->zt_transform = transform; 361 362 zt->zt_next = zio->io_transform_stack; 363 zio->io_transform_stack = zt; 364 365 zio->io_data = data; 366 zio->io_size = size; 367} 368 369static void 370zio_pop_transforms(zio_t *zio) 371{ 372 zio_transform_t *zt; 373 374 while ((zt = zio->io_transform_stack) != NULL) { 375 if (zt->zt_transform != NULL) 376 zt->zt_transform(zio, 377 zt->zt_orig_data, zt->zt_orig_size); 378 379 if (zt->zt_bufsize != 0) 380 zio_buf_free(zio->io_data, zt->zt_bufsize); 381 382 zio->io_data = zt->zt_orig_data; 383 zio->io_size = zt->zt_orig_size; 384 zio->io_transform_stack = zt->zt_next; 385 386 kmem_free(zt, sizeof (zio_transform_t)); 387 } 388} 389 390/* 391 * ========================================================================== 392 * I/O transform callbacks for subblocks and decompression 393 * ========================================================================== 394 */ 395static void 396zio_subblock(zio_t *zio, void *data, uint64_t size) 397{ 398 ASSERT(zio->io_size > size); 399 400 if (zio->io_type == ZIO_TYPE_READ) 401 bcopy(zio->io_data, data, size); 402} 403 404static void 405zio_decompress(zio_t *zio, void *data, uint64_t size) 406{ 407 if (zio->io_error == 0 && 408 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 409 zio->io_data, data, zio->io_size, size) != 0) 410 zio->io_error = EIO; 411} 412 413/* 414 * ========================================================================== 415 * I/O parent/child relationships and pipeline interlocks 416 * ========================================================================== 417 */ 418/* 419 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 420 * continue calling these functions until they return NULL. 421 * Otherwise, the next caller will pick up the list walk in 422 * some indeterminate state. (Otherwise every caller would 423 * have to pass in a cookie to keep the state represented by 424 * io_walk_link, which gets annoying.) 425 */ 426zio_t * 427zio_walk_parents(zio_t *cio) 428{ 429 zio_link_t *zl = cio->io_walk_link; 430 list_t *pl = &cio->io_parent_list; 431 432 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 433 cio->io_walk_link = zl; 434 435 if (zl == NULL) 436 return (NULL); 437 438 ASSERT(zl->zl_child == cio); 439 return (zl->zl_parent); 440} 441 442zio_t * 443zio_walk_children(zio_t *pio) 444{ 445 zio_link_t *zl = pio->io_walk_link; 446 list_t *cl = &pio->io_child_list; 447 448 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 449 pio->io_walk_link = zl; 450 451 if (zl == NULL) 452 return (NULL); 453 454 ASSERT(zl->zl_parent == pio); 455 return (zl->zl_child); 456} 457 458zio_t * 459zio_unique_parent(zio_t *cio) 460{ 461 zio_t *pio = zio_walk_parents(cio); 462 463 VERIFY(zio_walk_parents(cio) == NULL); 464 return (pio); 465} 466 467void 468zio_add_child(zio_t *pio, zio_t *cio) 469{ 470 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 471 472 /* 473 * Logical I/Os can have logical, gang, or vdev children. 474 * Gang I/Os can have gang or vdev children. 475 * Vdev I/Os can only have vdev children. 476 * The following ASSERT captures all of these constraints. 477 */ 478 ASSERT(cio->io_child_type <= pio->io_child_type); 479 480 zl->zl_parent = pio; 481 zl->zl_child = cio; 482 483 mutex_enter(&cio->io_lock); 484 mutex_enter(&pio->io_lock); 485 486 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 487 488 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 489 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 490 491 list_insert_head(&pio->io_child_list, zl); 492 list_insert_head(&cio->io_parent_list, zl); 493 494 pio->io_child_count++; 495 cio->io_parent_count++; 496 497 mutex_exit(&pio->io_lock); 498 mutex_exit(&cio->io_lock); 499} 500 501static void 502zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 503{ 504 ASSERT(zl->zl_parent == pio); 505 ASSERT(zl->zl_child == cio); 506 507 mutex_enter(&cio->io_lock); 508 mutex_enter(&pio->io_lock); 509 510 list_remove(&pio->io_child_list, zl); 511 list_remove(&cio->io_parent_list, zl); 512 513 pio->io_child_count--; 514 cio->io_parent_count--; 515 516 mutex_exit(&pio->io_lock); 517 mutex_exit(&cio->io_lock); 518 519 kmem_cache_free(zio_link_cache, zl); 520} 521 522static boolean_t 523zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 524{ 525 uint64_t *countp = &zio->io_children[child][wait]; 526 boolean_t waiting = B_FALSE; 527 528 mutex_enter(&zio->io_lock); 529 ASSERT(zio->io_stall == NULL); 530 if (*countp != 0) { 531 zio->io_stage >>= 1; 532 zio->io_stall = countp; 533 waiting = B_TRUE; 534 } 535 mutex_exit(&zio->io_lock); 536 537 return (waiting); 538} 539 540static void 541zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 542{ 543 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 544 int *errorp = &pio->io_child_error[zio->io_child_type]; 545 546 mutex_enter(&pio->io_lock); 547 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 548 *errorp = zio_worst_error(*errorp, zio->io_error); 549 pio->io_reexecute |= zio->io_reexecute; 550 ASSERT3U(*countp, >, 0); 551 if (--*countp == 0 && pio->io_stall == countp) { 552 pio->io_stall = NULL; 553 mutex_exit(&pio->io_lock); 554 zio_execute(pio); 555 } else { 556 mutex_exit(&pio->io_lock); 557 } 558} 559 560static void 561zio_inherit_child_errors(zio_t *zio, enum zio_child c) 562{ 563 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 564 zio->io_error = zio->io_child_error[c]; 565} 566 567/* 568 * ========================================================================== 569 * Create the various types of I/O (read, write, free, etc) 570 * ========================================================================== 571 */ 572static zio_t * 573zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 574 void *data, uint64_t size, zio_done_func_t *done, void *private, 575 zio_type_t type, int priority, enum zio_flag flags, 576 vdev_t *vd, uint64_t offset, const zbookmark_t *zb, 577 enum zio_stage stage, enum zio_stage pipeline) 578{ 579 zio_t *zio; 580 581 ASSERT3U(type == ZIO_TYPE_FREE || size, <=, SPA_MAXBLOCKSIZE); 582 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 583 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 584 585 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 586 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 587 ASSERT(vd || stage == ZIO_STAGE_OPEN); 588 589 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 590 bzero(zio, sizeof (zio_t)); 591 592 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 593 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 594 595 list_create(&zio->io_parent_list, sizeof (zio_link_t), 596 offsetof(zio_link_t, zl_parent_node)); 597 list_create(&zio->io_child_list, sizeof (zio_link_t), 598 offsetof(zio_link_t, zl_child_node)); 599 600 if (vd != NULL) 601 zio->io_child_type = ZIO_CHILD_VDEV; 602 else if (flags & ZIO_FLAG_GANG_CHILD) 603 zio->io_child_type = ZIO_CHILD_GANG; 604 else if (flags & ZIO_FLAG_DDT_CHILD) 605 zio->io_child_type = ZIO_CHILD_DDT; 606 else 607 zio->io_child_type = ZIO_CHILD_LOGICAL; 608 609 if (bp != NULL) { 610 zio->io_bp = (blkptr_t *)bp; 611 zio->io_bp_copy = *bp; 612 zio->io_bp_orig = *bp; 613 if (type != ZIO_TYPE_WRITE || 614 zio->io_child_type == ZIO_CHILD_DDT) 615 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 616 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 617 zio->io_logical = zio; 618 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 619 pipeline |= ZIO_GANG_STAGES; 620 } 621 622 zio->io_spa = spa; 623 zio->io_txg = txg; 624 zio->io_done = done; 625 zio->io_private = private; 626 zio->io_type = type; 627 zio->io_priority = priority; 628 zio->io_vd = vd; 629 zio->io_offset = offset; 630 zio->io_orig_data = zio->io_data = data; 631 zio->io_orig_size = zio->io_size = size; 632 zio->io_orig_flags = zio->io_flags = flags; 633 zio->io_orig_stage = zio->io_stage = stage; 634 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 635 636 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 637 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 638 639 if (zb != NULL) 640 zio->io_bookmark = *zb; 641 642 if (pio != NULL) { 643 if (zio->io_logical == NULL) 644 zio->io_logical = pio->io_logical; 645 if (zio->io_child_type == ZIO_CHILD_GANG) 646 zio->io_gang_leader = pio->io_gang_leader; 647 zio_add_child(pio, zio); 648 } 649 650 return (zio); 651} 652 653static void 654zio_destroy(zio_t *zio) 655{ 656 list_destroy(&zio->io_parent_list); 657 list_destroy(&zio->io_child_list); 658 mutex_destroy(&zio->io_lock); 659 cv_destroy(&zio->io_cv); 660 kmem_cache_free(zio_cache, zio); 661} 662 663zio_t * 664zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 665 void *private, enum zio_flag flags) 666{ 667 zio_t *zio; 668 669 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 670 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 671 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 672 673 return (zio); 674} 675 676zio_t * 677zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 678{ 679 return (zio_null(NULL, spa, NULL, done, private, flags)); 680} 681 682zio_t * 683zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 684 void *data, uint64_t size, zio_done_func_t *done, void *private, 685 int priority, enum zio_flag flags, const zbookmark_t *zb) 686{ 687 zio_t *zio; 688 689 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 690 data, size, done, private, 691 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 692 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 693 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 694 695 return (zio); 696} 697 698zio_t * 699zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 700 void *data, uint64_t size, const zio_prop_t *zp, 701 zio_done_func_t *ready, zio_done_func_t *done, void *private, 702 int priority, enum zio_flag flags, const zbookmark_t *zb) 703{ 704 zio_t *zio; 705 706 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 707 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 708 zp->zp_compress >= ZIO_COMPRESS_OFF && 709 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 710 DMU_OT_IS_VALID(zp->zp_type) && 711 zp->zp_level < 32 && 712 zp->zp_copies > 0 && 713 zp->zp_copies <= spa_max_replication(spa)); 714 715 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 716 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 717 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 718 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 719 720 zio->io_ready = ready; 721 zio->io_prop = *zp; 722 723 return (zio); 724} 725 726zio_t * 727zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 728 uint64_t size, zio_done_func_t *done, void *private, int priority, 729 enum zio_flag flags, zbookmark_t *zb) 730{ 731 zio_t *zio; 732 733 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 734 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 735 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 736 737 return (zio); 738} 739 740void 741zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 742{ 743 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 744 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 745 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 746 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 747 748 /* 749 * We must reset the io_prop to match the values that existed 750 * when the bp was first written by dmu_sync() keeping in mind 751 * that nopwrite and dedup are mutually exclusive. 752 */ 753 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 754 zio->io_prop.zp_nopwrite = nopwrite; 755 zio->io_prop.zp_copies = copies; 756 zio->io_bp_override = bp; 757} 758 759void 760zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 761{ 762 metaslab_check_free(spa, bp); 763 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 764} 765 766zio_t * 767zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 768 uint64_t size, enum zio_flag flags) 769{ 770 zio_t *zio; 771 772 dprintf_bp(bp, "freeing in txg %llu, pass %u", 773 (longlong_t)txg, spa->spa_sync_pass); 774 775 ASSERT(!BP_IS_HOLE(bp)); 776 ASSERT(spa_syncing_txg(spa) == txg); 777 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 778 779 metaslab_check_free(spa, bp); 780 781 zio = zio_create(pio, spa, txg, bp, NULL, size, 782 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags, 783 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 784 785 return (zio); 786} 787 788zio_t * 789zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 790 zio_done_func_t *done, void *private, enum zio_flag flags) 791{ 792 zio_t *zio; 793 794 /* 795 * A claim is an allocation of a specific block. Claims are needed 796 * to support immediate writes in the intent log. The issue is that 797 * immediate writes contain committed data, but in a txg that was 798 * *not* committed. Upon opening the pool after an unclean shutdown, 799 * the intent log claims all blocks that contain immediate write data 800 * so that the SPA knows they're in use. 801 * 802 * All claims *must* be resolved in the first txg -- before the SPA 803 * starts allocating blocks -- so that nothing is allocated twice. 804 * If txg == 0 we just verify that the block is claimable. 805 */ 806 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 807 ASSERT(txg == spa_first_txg(spa) || txg == 0); 808 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 809 810 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 811 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 812 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 813 814 return (zio); 815} 816 817zio_t * 818zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, uint64_t offset, 819 uint64_t size, zio_done_func_t *done, void *private, int priority, 820 enum zio_flag flags) 821{ 822 zio_t *zio; 823 int c; 824 825 if (vd->vdev_children == 0) { 826 zio = zio_create(pio, spa, 0, NULL, NULL, size, done, private, 827 ZIO_TYPE_IOCTL, priority, flags, vd, offset, NULL, 828 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 829 830 zio->io_cmd = cmd; 831 } else { 832 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 833 834 for (c = 0; c < vd->vdev_children; c++) 835 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 836 offset, size, done, private, priority, flags)); 837 } 838 839 return (zio); 840} 841 842zio_t * 843zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 844 void *data, int checksum, zio_done_func_t *done, void *private, 845 int priority, enum zio_flag flags, boolean_t labels) 846{ 847 zio_t *zio; 848 849 ASSERT(vd->vdev_children == 0); 850 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 851 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 852 ASSERT3U(offset + size, <=, vd->vdev_psize); 853 854 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 855 ZIO_TYPE_READ, priority, flags, vd, offset, NULL, 856 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 857 858 zio->io_prop.zp_checksum = checksum; 859 860 return (zio); 861} 862 863zio_t * 864zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 865 void *data, int checksum, zio_done_func_t *done, void *private, 866 int priority, enum zio_flag flags, boolean_t labels) 867{ 868 zio_t *zio; 869 870 ASSERT(vd->vdev_children == 0); 871 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 872 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 873 ASSERT3U(offset + size, <=, vd->vdev_psize); 874 875 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 876 ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, 877 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 878 879 zio->io_prop.zp_checksum = checksum; 880 881 if (zio_checksum_table[checksum].ci_eck) { 882 /* 883 * zec checksums are necessarily destructive -- they modify 884 * the end of the write buffer to hold the verifier/checksum. 885 * Therefore, we must make a local copy in case the data is 886 * being written to multiple places in parallel. 887 */ 888 void *wbuf = zio_buf_alloc(size); 889 bcopy(data, wbuf, size); 890 zio_push_transform(zio, wbuf, size, size, NULL); 891 } 892 893 return (zio); 894} 895 896/* 897 * Create a child I/O to do some work for us. 898 */ 899zio_t * 900zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 901 void *data, uint64_t size, int type, int priority, enum zio_flag flags, 902 zio_done_func_t *done, void *private) 903{ 904 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 905 zio_t *zio; 906 907 ASSERT(vd->vdev_parent == 908 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 909 910 if (type == ZIO_TYPE_READ && bp != NULL) { 911 /* 912 * If we have the bp, then the child should perform the 913 * checksum and the parent need not. This pushes error 914 * detection as close to the leaves as possible and 915 * eliminates redundant checksums in the interior nodes. 916 */ 917 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 918 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 919 } 920 921 if (vd->vdev_children == 0) 922 offset += VDEV_LABEL_START_SIZE; 923 924 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 925 926 /* 927 * If we've decided to do a repair, the write is not speculative -- 928 * even if the original read was. 929 */ 930 if (flags & ZIO_FLAG_IO_REPAIR) 931 flags &= ~ZIO_FLAG_SPECULATIVE; 932 933 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 934 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 935 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 936 937 return (zio); 938} 939 940zio_t * 941zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 942 int type, int priority, enum zio_flag flags, 943 zio_done_func_t *done, void *private) 944{ 945 zio_t *zio; 946 947 ASSERT(vd->vdev_ops->vdev_op_leaf); 948 949 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 950 data, size, done, private, type, priority, 951 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY, 952 vd, offset, NULL, 953 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 954 955 return (zio); 956} 957 958void 959zio_flush(zio_t *zio, vdev_t *vd) 960{ 961 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 0, 0, 962 NULL, NULL, ZIO_PRIORITY_NOW, 963 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 964} 965 966zio_t * 967zio_trim(zio_t *zio, spa_t *spa, vdev_t *vd, uint64_t offset, uint64_t size) 968{ 969 970 ASSERT(vd->vdev_ops->vdev_op_leaf); 971 972 return zio_ioctl(zio, spa, vd, DKIOCTRIM, offset, size, 973 NULL, NULL, ZIO_PRIORITY_TRIM, 974 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY); 975} 976 977void 978zio_shrink(zio_t *zio, uint64_t size) 979{ 980 ASSERT(zio->io_executor == NULL); 981 ASSERT(zio->io_orig_size == zio->io_size); 982 ASSERT(size <= zio->io_size); 983 984 /* 985 * We don't shrink for raidz because of problems with the 986 * reconstruction when reading back less than the block size. 987 * Note, BP_IS_RAIDZ() assumes no compression. 988 */ 989 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 990 if (!BP_IS_RAIDZ(zio->io_bp)) 991 zio->io_orig_size = zio->io_size = size; 992} 993 994/* 995 * ========================================================================== 996 * Prepare to read and write logical blocks 997 * ========================================================================== 998 */ 999 1000static int 1001zio_read_bp_init(zio_t *zio) 1002{ 1003 blkptr_t *bp = zio->io_bp; 1004 1005 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1006 zio->io_child_type == ZIO_CHILD_LOGICAL && 1007 !(zio->io_flags & ZIO_FLAG_RAW)) { 1008 uint64_t psize = BP_GET_PSIZE(bp); 1009 void *cbuf = zio_buf_alloc(psize); 1010 1011 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 1012 } 1013 1014 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1015 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1016 1017 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1018 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1019 1020 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1021 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1022 1023 return (ZIO_PIPELINE_CONTINUE); 1024} 1025 1026static int 1027zio_write_bp_init(zio_t *zio) 1028{ 1029 spa_t *spa = zio->io_spa; 1030 zio_prop_t *zp = &zio->io_prop; 1031 enum zio_compress compress = zp->zp_compress; 1032 blkptr_t *bp = zio->io_bp; 1033 uint64_t lsize = zio->io_size; 1034 uint64_t psize = lsize; 1035 int pass = 1; 1036 1037 /* 1038 * If our children haven't all reached the ready stage, 1039 * wait for them and then repeat this pipeline stage. 1040 */ 1041 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 1042 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 1043 return (ZIO_PIPELINE_STOP); 1044 1045 if (!IO_IS_ALLOCATING(zio)) 1046 return (ZIO_PIPELINE_CONTINUE); 1047 1048 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1049 1050 if (zio->io_bp_override) { 1051 ASSERT(bp->blk_birth != zio->io_txg); 1052 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1053 1054 *bp = *zio->io_bp_override; 1055 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1056 1057 /* 1058 * If we've been overridden and nopwrite is set then 1059 * set the flag accordingly to indicate that a nopwrite 1060 * has already occurred. 1061 */ 1062 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1063 ASSERT(!zp->zp_dedup); 1064 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1065 return (ZIO_PIPELINE_CONTINUE); 1066 } 1067 1068 ASSERT(!zp->zp_nopwrite); 1069 1070 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1071 return (ZIO_PIPELINE_CONTINUE); 1072 1073 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 1074 zp->zp_dedup_verify); 1075 1076 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1077 BP_SET_DEDUP(bp, 1); 1078 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1079 return (ZIO_PIPELINE_CONTINUE); 1080 } 1081 zio->io_bp_override = NULL; 1082 BP_ZERO(bp); 1083 } 1084 1085 if (bp->blk_birth == zio->io_txg) { 1086 /* 1087 * We're rewriting an existing block, which means we're 1088 * working on behalf of spa_sync(). For spa_sync() to 1089 * converge, it must eventually be the case that we don't 1090 * have to allocate new blocks. But compression changes 1091 * the blocksize, which forces a reallocate, and makes 1092 * convergence take longer. Therefore, after the first 1093 * few passes, stop compressing to ensure convergence. 1094 */ 1095 pass = spa_sync_pass(spa); 1096 1097 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1098 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1099 ASSERT(!BP_GET_DEDUP(bp)); 1100 1101 if (pass >= zfs_sync_pass_dont_compress) 1102 compress = ZIO_COMPRESS_OFF; 1103 1104 /* Make sure someone doesn't change their mind on overwrites */ 1105 ASSERT(MIN(zp->zp_copies + BP_IS_GANG(bp), 1106 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1107 } 1108 1109 if (compress != ZIO_COMPRESS_OFF) { 1110 void *cbuf = zio_buf_alloc(lsize); 1111 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1112 if (psize == 0 || psize == lsize) { 1113 compress = ZIO_COMPRESS_OFF; 1114 zio_buf_free(cbuf, lsize); 1115 } else { 1116 ASSERT(psize < lsize); 1117 zio_push_transform(zio, cbuf, psize, lsize, NULL); 1118 } 1119 } 1120 1121 /* 1122 * The final pass of spa_sync() must be all rewrites, but the first 1123 * few passes offer a trade-off: allocating blocks defers convergence, 1124 * but newly allocated blocks are sequential, so they can be written 1125 * to disk faster. Therefore, we allow the first few passes of 1126 * spa_sync() to allocate new blocks, but force rewrites after that. 1127 * There should only be a handful of blocks after pass 1 in any case. 1128 */ 1129 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize && 1130 pass >= zfs_sync_pass_rewrite) { 1131 ASSERT(psize != 0); 1132 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1133 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1134 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1135 } else { 1136 BP_ZERO(bp); 1137 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1138 } 1139 1140 if (psize == 0) { 1141 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1142 } else { 1143 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1144 BP_SET_LSIZE(bp, lsize); 1145 BP_SET_PSIZE(bp, psize); 1146 BP_SET_COMPRESS(bp, compress); 1147 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1148 BP_SET_TYPE(bp, zp->zp_type); 1149 BP_SET_LEVEL(bp, zp->zp_level); 1150 BP_SET_DEDUP(bp, zp->zp_dedup); 1151 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1152 if (zp->zp_dedup) { 1153 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1154 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1155 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1156 } 1157 if (zp->zp_nopwrite) { 1158 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1159 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1160 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1161 } 1162 } 1163 1164 return (ZIO_PIPELINE_CONTINUE); 1165} 1166 1167static int 1168zio_free_bp_init(zio_t *zio) 1169{ 1170 blkptr_t *bp = zio->io_bp; 1171 1172 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1173 if (BP_GET_DEDUP(bp)) 1174 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1175 } 1176 1177 return (ZIO_PIPELINE_CONTINUE); 1178} 1179 1180/* 1181 * ========================================================================== 1182 * Execute the I/O pipeline 1183 * ========================================================================== 1184 */ 1185 1186static void 1187zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q, boolean_t cutinline) 1188{ 1189 spa_t *spa = zio->io_spa; 1190 zio_type_t t = zio->io_type; 1191 int flags = TQ_SLEEP | (cutinline ? TQ_FRONT : 0); 1192 1193 ASSERT(q == ZIO_TASKQ_ISSUE || q == ZIO_TASKQ_INTERRUPT); 1194 1195 /* 1196 * If we're a config writer or a probe, the normal issue and 1197 * interrupt threads may all be blocked waiting for the config lock. 1198 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1199 */ 1200 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1201 t = ZIO_TYPE_NULL; 1202 1203 /* 1204 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1205 */ 1206 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1207 t = ZIO_TYPE_NULL; 1208 1209 /* 1210 * If this is a high priority I/O, then use the high priority taskq. 1211 */ 1212 if (zio->io_priority == ZIO_PRIORITY_NOW && 1213 spa->spa_zio_taskq[t][q + 1] != NULL) 1214 q++; 1215 1216 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1217#ifdef _KERNEL 1218 (void) taskq_dispatch_safe(spa->spa_zio_taskq[t][q], 1219 (task_func_t *)zio_execute, zio, flags, &zio->io_task); 1220#else 1221 (void) taskq_dispatch(spa->spa_zio_taskq[t][q], 1222 (task_func_t *)zio_execute, zio, flags); 1223#endif 1224} 1225 1226static boolean_t 1227zio_taskq_member(zio_t *zio, enum zio_taskq_type q) 1228{ 1229 kthread_t *executor = zio->io_executor; 1230 spa_t *spa = zio->io_spa; 1231 1232 for (zio_type_t t = 0; t < ZIO_TYPES; t++) 1233 if (taskq_member(spa->spa_zio_taskq[t][q], executor)) 1234 return (B_TRUE); 1235 1236 return (B_FALSE); 1237} 1238 1239static int 1240zio_issue_async(zio_t *zio) 1241{ 1242 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1243 1244 return (ZIO_PIPELINE_STOP); 1245} 1246 1247void 1248zio_interrupt(zio_t *zio) 1249{ 1250 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1251} 1252 1253/* 1254 * Execute the I/O pipeline until one of the following occurs: 1255 * (1) the I/O completes; (2) the pipeline stalls waiting for 1256 * dependent child I/Os; (3) the I/O issues, so we're waiting 1257 * for an I/O completion interrupt; (4) the I/O is delegated by 1258 * vdev-level caching or aggregation; (5) the I/O is deferred 1259 * due to vdev-level queueing; (6) the I/O is handed off to 1260 * another thread. In all cases, the pipeline stops whenever 1261 * there's no CPU work; it never burns a thread in cv_wait(). 1262 * 1263 * There's no locking on io_stage because there's no legitimate way 1264 * for multiple threads to be attempting to process the same I/O. 1265 */ 1266static zio_pipe_stage_t *zio_pipeline[]; 1267 1268void 1269zio_execute(zio_t *zio) 1270{ 1271 zio->io_executor = curthread; 1272 1273 while (zio->io_stage < ZIO_STAGE_DONE) { 1274 enum zio_stage pipeline = zio->io_pipeline; 1275 enum zio_stage stage = zio->io_stage; 1276 int rv; 1277 1278 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1279 ASSERT(ISP2(stage)); 1280 ASSERT(zio->io_stall == NULL); 1281 1282 do { 1283 stage <<= 1; 1284 } while ((stage & pipeline) == 0); 1285 1286 ASSERT(stage <= ZIO_STAGE_DONE); 1287 1288 /* 1289 * If we are in interrupt context and this pipeline stage 1290 * will grab a config lock that is held across I/O, 1291 * or may wait for an I/O that needs an interrupt thread 1292 * to complete, issue async to avoid deadlock. 1293 * 1294 * For VDEV_IO_START, we cut in line so that the io will 1295 * be sent to disk promptly. 1296 */ 1297 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1298 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1299 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1300 zio_requeue_io_start_cut_in_line : B_FALSE; 1301 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1302 return; 1303 } 1304 1305 zio->io_stage = stage; 1306 rv = zio_pipeline[highbit(stage) - 1](zio); 1307 1308 if (rv == ZIO_PIPELINE_STOP) 1309 return; 1310 1311 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1312 } 1313} 1314 1315/* 1316 * ========================================================================== 1317 * Initiate I/O, either sync or async 1318 * ========================================================================== 1319 */ 1320int 1321zio_wait(zio_t *zio) 1322{ 1323 int error; 1324 1325 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1326 ASSERT(zio->io_executor == NULL); 1327 1328 zio->io_waiter = curthread; 1329 1330 zio_execute(zio); 1331 1332 mutex_enter(&zio->io_lock); 1333 while (zio->io_executor != NULL) 1334 cv_wait(&zio->io_cv, &zio->io_lock); 1335 mutex_exit(&zio->io_lock); 1336 1337 error = zio->io_error; 1338 zio_destroy(zio); 1339 1340 return (error); 1341} 1342 1343void 1344zio_nowait(zio_t *zio) 1345{ 1346 ASSERT(zio->io_executor == NULL); 1347 1348 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1349 zio_unique_parent(zio) == NULL) { 1350 /* 1351 * This is a logical async I/O with no parent to wait for it. 1352 * We add it to the spa_async_root_zio "Godfather" I/O which 1353 * will ensure they complete prior to unloading the pool. 1354 */ 1355 spa_t *spa = zio->io_spa; 1356 1357 zio_add_child(spa->spa_async_zio_root, zio); 1358 } 1359 1360 zio_execute(zio); 1361} 1362 1363/* 1364 * ========================================================================== 1365 * Reexecute or suspend/resume failed I/O 1366 * ========================================================================== 1367 */ 1368 1369static void 1370zio_reexecute(zio_t *pio) 1371{ 1372 zio_t *cio, *cio_next; 1373 1374 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1375 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1376 ASSERT(pio->io_gang_leader == NULL); 1377 ASSERT(pio->io_gang_tree == NULL); 1378 1379 pio->io_flags = pio->io_orig_flags; 1380 pio->io_stage = pio->io_orig_stage; 1381 pio->io_pipeline = pio->io_orig_pipeline; 1382 pio->io_reexecute = 0; 1383 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1384 pio->io_error = 0; 1385 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1386 pio->io_state[w] = 0; 1387 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1388 pio->io_child_error[c] = 0; 1389 1390 if (IO_IS_ALLOCATING(pio)) 1391 BP_ZERO(pio->io_bp); 1392 1393 /* 1394 * As we reexecute pio's children, new children could be created. 1395 * New children go to the head of pio's io_child_list, however, 1396 * so we will (correctly) not reexecute them. The key is that 1397 * the remainder of pio's io_child_list, from 'cio_next' onward, 1398 * cannot be affected by any side effects of reexecuting 'cio'. 1399 */ 1400 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1401 cio_next = zio_walk_children(pio); 1402 mutex_enter(&pio->io_lock); 1403 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1404 pio->io_children[cio->io_child_type][w]++; 1405 mutex_exit(&pio->io_lock); 1406 zio_reexecute(cio); 1407 } 1408 1409 /* 1410 * Now that all children have been reexecuted, execute the parent. 1411 * We don't reexecute "The Godfather" I/O here as it's the 1412 * responsibility of the caller to wait on him. 1413 */ 1414 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1415 zio_execute(pio); 1416} 1417 1418void 1419zio_suspend(spa_t *spa, zio_t *zio) 1420{ 1421 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1422 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1423 "failure and the failure mode property for this pool " 1424 "is set to panic.", spa_name(spa)); 1425 1426 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1427 1428 mutex_enter(&spa->spa_suspend_lock); 1429 1430 if (spa->spa_suspend_zio_root == NULL) 1431 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1432 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1433 ZIO_FLAG_GODFATHER); 1434 1435 spa->spa_suspended = B_TRUE; 1436 1437 if (zio != NULL) { 1438 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1439 ASSERT(zio != spa->spa_suspend_zio_root); 1440 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1441 ASSERT(zio_unique_parent(zio) == NULL); 1442 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1443 zio_add_child(spa->spa_suspend_zio_root, zio); 1444 } 1445 1446 mutex_exit(&spa->spa_suspend_lock); 1447} 1448 1449int 1450zio_resume(spa_t *spa) 1451{ 1452 zio_t *pio; 1453 1454 /* 1455 * Reexecute all previously suspended i/o. 1456 */ 1457 mutex_enter(&spa->spa_suspend_lock); 1458 spa->spa_suspended = B_FALSE; 1459 cv_broadcast(&spa->spa_suspend_cv); 1460 pio = spa->spa_suspend_zio_root; 1461 spa->spa_suspend_zio_root = NULL; 1462 mutex_exit(&spa->spa_suspend_lock); 1463 1464 if (pio == NULL) 1465 return (0); 1466 1467 zio_reexecute(pio); 1468 return (zio_wait(pio)); 1469} 1470 1471void 1472zio_resume_wait(spa_t *spa) 1473{ 1474 mutex_enter(&spa->spa_suspend_lock); 1475 while (spa_suspended(spa)) 1476 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1477 mutex_exit(&spa->spa_suspend_lock); 1478} 1479 1480/* 1481 * ========================================================================== 1482 * Gang blocks. 1483 * 1484 * A gang block is a collection of small blocks that looks to the DMU 1485 * like one large block. When zio_dva_allocate() cannot find a block 1486 * of the requested size, due to either severe fragmentation or the pool 1487 * being nearly full, it calls zio_write_gang_block() to construct the 1488 * block from smaller fragments. 1489 * 1490 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1491 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1492 * an indirect block: it's an array of block pointers. It consumes 1493 * only one sector and hence is allocatable regardless of fragmentation. 1494 * The gang header's bps point to its gang members, which hold the data. 1495 * 1496 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1497 * as the verifier to ensure uniqueness of the SHA256 checksum. 1498 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1499 * not the gang header. This ensures that data block signatures (needed for 1500 * deduplication) are independent of how the block is physically stored. 1501 * 1502 * Gang blocks can be nested: a gang member may itself be a gang block. 1503 * Thus every gang block is a tree in which root and all interior nodes are 1504 * gang headers, and the leaves are normal blocks that contain user data. 1505 * The root of the gang tree is called the gang leader. 1506 * 1507 * To perform any operation (read, rewrite, free, claim) on a gang block, 1508 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1509 * in the io_gang_tree field of the original logical i/o by recursively 1510 * reading the gang leader and all gang headers below it. This yields 1511 * an in-core tree containing the contents of every gang header and the 1512 * bps for every constituent of the gang block. 1513 * 1514 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1515 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1516 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1517 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1518 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1519 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1520 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1521 * of the gang header plus zio_checksum_compute() of the data to update the 1522 * gang header's blk_cksum as described above. 1523 * 1524 * The two-phase assemble/issue model solves the problem of partial failure -- 1525 * what if you'd freed part of a gang block but then couldn't read the 1526 * gang header for another part? Assembling the entire gang tree first 1527 * ensures that all the necessary gang header I/O has succeeded before 1528 * starting the actual work of free, claim, or write. Once the gang tree 1529 * is assembled, free and claim are in-memory operations that cannot fail. 1530 * 1531 * In the event that a gang write fails, zio_dva_unallocate() walks the 1532 * gang tree to immediately free (i.e. insert back into the space map) 1533 * everything we've allocated. This ensures that we don't get ENOSPC 1534 * errors during repeated suspend/resume cycles due to a flaky device. 1535 * 1536 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1537 * the gang tree, we won't modify the block, so we can safely defer the free 1538 * (knowing that the block is still intact). If we *can* assemble the gang 1539 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1540 * each constituent bp and we can allocate a new block on the next sync pass. 1541 * 1542 * In all cases, the gang tree allows complete recovery from partial failure. 1543 * ========================================================================== 1544 */ 1545 1546static zio_t * 1547zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1548{ 1549 if (gn != NULL) 1550 return (pio); 1551 1552 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1553 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1554 &pio->io_bookmark)); 1555} 1556 1557zio_t * 1558zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1559{ 1560 zio_t *zio; 1561 1562 if (gn != NULL) { 1563 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1564 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1565 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1566 /* 1567 * As we rewrite each gang header, the pipeline will compute 1568 * a new gang block header checksum for it; but no one will 1569 * compute a new data checksum, so we do that here. The one 1570 * exception is the gang leader: the pipeline already computed 1571 * its data checksum because that stage precedes gang assembly. 1572 * (Presently, nothing actually uses interior data checksums; 1573 * this is just good hygiene.) 1574 */ 1575 if (gn != pio->io_gang_leader->io_gang_tree) { 1576 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1577 data, BP_GET_PSIZE(bp)); 1578 } 1579 /* 1580 * If we are here to damage data for testing purposes, 1581 * leave the GBH alone so that we can detect the damage. 1582 */ 1583 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1584 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1585 } else { 1586 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1587 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1588 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1589 } 1590 1591 return (zio); 1592} 1593 1594/* ARGSUSED */ 1595zio_t * 1596zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1597{ 1598 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1599 BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp), 1600 ZIO_GANG_CHILD_FLAGS(pio))); 1601} 1602 1603/* ARGSUSED */ 1604zio_t * 1605zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1606{ 1607 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1608 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1609} 1610 1611static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1612 NULL, 1613 zio_read_gang, 1614 zio_rewrite_gang, 1615 zio_free_gang, 1616 zio_claim_gang, 1617 NULL 1618}; 1619 1620static void zio_gang_tree_assemble_done(zio_t *zio); 1621 1622static zio_gang_node_t * 1623zio_gang_node_alloc(zio_gang_node_t **gnpp) 1624{ 1625 zio_gang_node_t *gn; 1626 1627 ASSERT(*gnpp == NULL); 1628 1629 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1630 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1631 *gnpp = gn; 1632 1633 return (gn); 1634} 1635 1636static void 1637zio_gang_node_free(zio_gang_node_t **gnpp) 1638{ 1639 zio_gang_node_t *gn = *gnpp; 1640 1641 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1642 ASSERT(gn->gn_child[g] == NULL); 1643 1644 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1645 kmem_free(gn, sizeof (*gn)); 1646 *gnpp = NULL; 1647} 1648 1649static void 1650zio_gang_tree_free(zio_gang_node_t **gnpp) 1651{ 1652 zio_gang_node_t *gn = *gnpp; 1653 1654 if (gn == NULL) 1655 return; 1656 1657 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1658 zio_gang_tree_free(&gn->gn_child[g]); 1659 1660 zio_gang_node_free(gnpp); 1661} 1662 1663static void 1664zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1665{ 1666 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1667 1668 ASSERT(gio->io_gang_leader == gio); 1669 ASSERT(BP_IS_GANG(bp)); 1670 1671 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1672 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1673 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1674} 1675 1676static void 1677zio_gang_tree_assemble_done(zio_t *zio) 1678{ 1679 zio_t *gio = zio->io_gang_leader; 1680 zio_gang_node_t *gn = zio->io_private; 1681 blkptr_t *bp = zio->io_bp; 1682 1683 ASSERT(gio == zio_unique_parent(zio)); 1684 ASSERT(zio->io_child_count == 0); 1685 1686 if (zio->io_error) 1687 return; 1688 1689 if (BP_SHOULD_BYTESWAP(bp)) 1690 byteswap_uint64_array(zio->io_data, zio->io_size); 1691 1692 ASSERT(zio->io_data == gn->gn_gbh); 1693 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1694 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1695 1696 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1697 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1698 if (!BP_IS_GANG(gbp)) 1699 continue; 1700 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1701 } 1702} 1703 1704static void 1705zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1706{ 1707 zio_t *gio = pio->io_gang_leader; 1708 zio_t *zio; 1709 1710 ASSERT(BP_IS_GANG(bp) == !!gn); 1711 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1712 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1713 1714 /* 1715 * If you're a gang header, your data is in gn->gn_gbh. 1716 * If you're a gang member, your data is in 'data' and gn == NULL. 1717 */ 1718 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1719 1720 if (gn != NULL) { 1721 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1722 1723 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1724 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1725 if (BP_IS_HOLE(gbp)) 1726 continue; 1727 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1728 data = (char *)data + BP_GET_PSIZE(gbp); 1729 } 1730 } 1731 1732 if (gn == gio->io_gang_tree && gio->io_data != NULL) 1733 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1734 1735 if (zio != pio) 1736 zio_nowait(zio); 1737} 1738 1739static int 1740zio_gang_assemble(zio_t *zio) 1741{ 1742 blkptr_t *bp = zio->io_bp; 1743 1744 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1745 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1746 1747 zio->io_gang_leader = zio; 1748 1749 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1750 1751 return (ZIO_PIPELINE_CONTINUE); 1752} 1753 1754static int 1755zio_gang_issue(zio_t *zio) 1756{ 1757 blkptr_t *bp = zio->io_bp; 1758 1759 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1760 return (ZIO_PIPELINE_STOP); 1761 1762 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1763 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1764 1765 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1766 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1767 else 1768 zio_gang_tree_free(&zio->io_gang_tree); 1769 1770 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1771 1772 return (ZIO_PIPELINE_CONTINUE); 1773} 1774 1775static void 1776zio_write_gang_member_ready(zio_t *zio) 1777{ 1778 zio_t *pio = zio_unique_parent(zio); 1779 zio_t *gio = zio->io_gang_leader; 1780 dva_t *cdva = zio->io_bp->blk_dva; 1781 dva_t *pdva = pio->io_bp->blk_dva; 1782 uint64_t asize; 1783 1784 if (BP_IS_HOLE(zio->io_bp)) 1785 return; 1786 1787 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1788 1789 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1790 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1791 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1792 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1793 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1794 1795 mutex_enter(&pio->io_lock); 1796 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1797 ASSERT(DVA_GET_GANG(&pdva[d])); 1798 asize = DVA_GET_ASIZE(&pdva[d]); 1799 asize += DVA_GET_ASIZE(&cdva[d]); 1800 DVA_SET_ASIZE(&pdva[d], asize); 1801 } 1802 mutex_exit(&pio->io_lock); 1803} 1804 1805static int 1806zio_write_gang_block(zio_t *pio) 1807{ 1808 spa_t *spa = pio->io_spa; 1809 blkptr_t *bp = pio->io_bp; 1810 zio_t *gio = pio->io_gang_leader; 1811 zio_t *zio; 1812 zio_gang_node_t *gn, **gnpp; 1813 zio_gbh_phys_t *gbh; 1814 uint64_t txg = pio->io_txg; 1815 uint64_t resid = pio->io_size; 1816 uint64_t lsize; 1817 int copies = gio->io_prop.zp_copies; 1818 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 1819 zio_prop_t zp; 1820 int error; 1821 1822 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 1823 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 1824 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1825 if (error) { 1826 pio->io_error = error; 1827 return (ZIO_PIPELINE_CONTINUE); 1828 } 1829 1830 if (pio == gio) { 1831 gnpp = &gio->io_gang_tree; 1832 } else { 1833 gnpp = pio->io_private; 1834 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1835 } 1836 1837 gn = zio_gang_node_alloc(gnpp); 1838 gbh = gn->gn_gbh; 1839 bzero(gbh, SPA_GANGBLOCKSIZE); 1840 1841 /* 1842 * Create the gang header. 1843 */ 1844 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1845 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1846 1847 /* 1848 * Create and nowait the gang children. 1849 */ 1850 for (int g = 0; resid != 0; resid -= lsize, g++) { 1851 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1852 SPA_MINBLOCKSIZE); 1853 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1854 1855 zp.zp_checksum = gio->io_prop.zp_checksum; 1856 zp.zp_compress = ZIO_COMPRESS_OFF; 1857 zp.zp_type = DMU_OT_NONE; 1858 zp.zp_level = 0; 1859 zp.zp_copies = gio->io_prop.zp_copies; 1860 zp.zp_dedup = B_FALSE; 1861 zp.zp_dedup_verify = B_FALSE; 1862 zp.zp_nopwrite = B_FALSE; 1863 1864 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1865 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1866 zio_write_gang_member_ready, NULL, &gn->gn_child[g], 1867 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1868 &pio->io_bookmark)); 1869 } 1870 1871 /* 1872 * Set pio's pipeline to just wait for zio to finish. 1873 */ 1874 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1875 1876 zio_nowait(zio); 1877 1878 return (ZIO_PIPELINE_CONTINUE); 1879} 1880 1881/* 1882 * The zio_nop_write stage in the pipeline determines if allocating 1883 * a new bp is necessary. By leveraging a cryptographically secure checksum, 1884 * such as SHA256, we can compare the checksums of the new data and the old 1885 * to determine if allocating a new block is required. The nopwrite 1886 * feature can handle writes in either syncing or open context (i.e. zil 1887 * writes) and as a result is mutually exclusive with dedup. 1888 */ 1889static int 1890zio_nop_write(zio_t *zio) 1891{ 1892 blkptr_t *bp = zio->io_bp; 1893 blkptr_t *bp_orig = &zio->io_bp_orig; 1894 zio_prop_t *zp = &zio->io_prop; 1895 1896 ASSERT(BP_GET_LEVEL(bp) == 0); 1897 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1898 ASSERT(zp->zp_nopwrite); 1899 ASSERT(!zp->zp_dedup); 1900 ASSERT(zio->io_bp_override == NULL); 1901 ASSERT(IO_IS_ALLOCATING(zio)); 1902 1903 /* 1904 * Check to see if the original bp and the new bp have matching 1905 * characteristics (i.e. same checksum, compression algorithms, etc). 1906 * If they don't then just continue with the pipeline which will 1907 * allocate a new bp. 1908 */ 1909 if (BP_IS_HOLE(bp_orig) || 1910 !zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_dedup || 1911 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 1912 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 1913 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 1914 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 1915 return (ZIO_PIPELINE_CONTINUE); 1916 1917 /* 1918 * If the checksums match then reset the pipeline so that we 1919 * avoid allocating a new bp and issuing any I/O. 1920 */ 1921 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 1922 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup); 1923 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 1924 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 1925 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 1926 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 1927 sizeof (uint64_t)) == 0); 1928 1929 *bp = *bp_orig; 1930 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1931 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1932 } 1933 1934 return (ZIO_PIPELINE_CONTINUE); 1935} 1936 1937/* 1938 * ========================================================================== 1939 * Dedup 1940 * ========================================================================== 1941 */ 1942static void 1943zio_ddt_child_read_done(zio_t *zio) 1944{ 1945 blkptr_t *bp = zio->io_bp; 1946 ddt_entry_t *dde = zio->io_private; 1947 ddt_phys_t *ddp; 1948 zio_t *pio = zio_unique_parent(zio); 1949 1950 mutex_enter(&pio->io_lock); 1951 ddp = ddt_phys_select(dde, bp); 1952 if (zio->io_error == 0) 1953 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 1954 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 1955 dde->dde_repair_data = zio->io_data; 1956 else 1957 zio_buf_free(zio->io_data, zio->io_size); 1958 mutex_exit(&pio->io_lock); 1959} 1960 1961static int 1962zio_ddt_read_start(zio_t *zio) 1963{ 1964 blkptr_t *bp = zio->io_bp; 1965 1966 ASSERT(BP_GET_DEDUP(bp)); 1967 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1968 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1969 1970 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1971 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1972 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 1973 ddt_phys_t *ddp = dde->dde_phys; 1974 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 1975 blkptr_t blk; 1976 1977 ASSERT(zio->io_vsd == NULL); 1978 zio->io_vsd = dde; 1979 1980 if (ddp_self == NULL) 1981 return (ZIO_PIPELINE_CONTINUE); 1982 1983 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1984 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 1985 continue; 1986 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 1987 &blk); 1988 zio_nowait(zio_read(zio, zio->io_spa, &blk, 1989 zio_buf_alloc(zio->io_size), zio->io_size, 1990 zio_ddt_child_read_done, dde, zio->io_priority, 1991 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 1992 &zio->io_bookmark)); 1993 } 1994 return (ZIO_PIPELINE_CONTINUE); 1995 } 1996 1997 zio_nowait(zio_read(zio, zio->io_spa, bp, 1998 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 1999 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2000 2001 return (ZIO_PIPELINE_CONTINUE); 2002} 2003 2004static int 2005zio_ddt_read_done(zio_t *zio) 2006{ 2007 blkptr_t *bp = zio->io_bp; 2008 2009 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 2010 return (ZIO_PIPELINE_STOP); 2011 2012 ASSERT(BP_GET_DEDUP(bp)); 2013 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2014 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2015 2016 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2017 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2018 ddt_entry_t *dde = zio->io_vsd; 2019 if (ddt == NULL) { 2020 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2021 return (ZIO_PIPELINE_CONTINUE); 2022 } 2023 if (dde == NULL) { 2024 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2025 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2026 return (ZIO_PIPELINE_STOP); 2027 } 2028 if (dde->dde_repair_data != NULL) { 2029 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 2030 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2031 } 2032 ddt_repair_done(ddt, dde); 2033 zio->io_vsd = NULL; 2034 } 2035 2036 ASSERT(zio->io_vsd == NULL); 2037 2038 return (ZIO_PIPELINE_CONTINUE); 2039} 2040 2041static boolean_t 2042zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2043{ 2044 spa_t *spa = zio->io_spa; 2045 2046 /* 2047 * Note: we compare the original data, not the transformed data, 2048 * because when zio->io_bp is an override bp, we will not have 2049 * pushed the I/O transforms. That's an important optimization 2050 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2051 */ 2052 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2053 zio_t *lio = dde->dde_lead_zio[p]; 2054 2055 if (lio != NULL) { 2056 return (lio->io_orig_size != zio->io_orig_size || 2057 bcmp(zio->io_orig_data, lio->io_orig_data, 2058 zio->io_orig_size) != 0); 2059 } 2060 } 2061 2062 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2063 ddt_phys_t *ddp = &dde->dde_phys[p]; 2064 2065 if (ddp->ddp_phys_birth != 0) { 2066 arc_buf_t *abuf = NULL; 2067 uint32_t aflags = ARC_WAIT; 2068 blkptr_t blk = *zio->io_bp; 2069 int error; 2070 2071 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2072 2073 ddt_exit(ddt); 2074 2075 error = arc_read(NULL, spa, &blk, 2076 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2077 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2078 &aflags, &zio->io_bookmark); 2079 2080 if (error == 0) { 2081 if (arc_buf_size(abuf) != zio->io_orig_size || 2082 bcmp(abuf->b_data, zio->io_orig_data, 2083 zio->io_orig_size) != 0) 2084 error = EEXIST; 2085 VERIFY(arc_buf_remove_ref(abuf, &abuf)); 2086 } 2087 2088 ddt_enter(ddt); 2089 return (error != 0); 2090 } 2091 } 2092 2093 return (B_FALSE); 2094} 2095 2096static void 2097zio_ddt_child_write_ready(zio_t *zio) 2098{ 2099 int p = zio->io_prop.zp_copies; 2100 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2101 ddt_entry_t *dde = zio->io_private; 2102 ddt_phys_t *ddp = &dde->dde_phys[p]; 2103 zio_t *pio; 2104 2105 if (zio->io_error) 2106 return; 2107 2108 ddt_enter(ddt); 2109 2110 ASSERT(dde->dde_lead_zio[p] == zio); 2111 2112 ddt_phys_fill(ddp, zio->io_bp); 2113 2114 while ((pio = zio_walk_parents(zio)) != NULL) 2115 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2116 2117 ddt_exit(ddt); 2118} 2119 2120static void 2121zio_ddt_child_write_done(zio_t *zio) 2122{ 2123 int p = zio->io_prop.zp_copies; 2124 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2125 ddt_entry_t *dde = zio->io_private; 2126 ddt_phys_t *ddp = &dde->dde_phys[p]; 2127 2128 ddt_enter(ddt); 2129 2130 ASSERT(ddp->ddp_refcnt == 0); 2131 ASSERT(dde->dde_lead_zio[p] == zio); 2132 dde->dde_lead_zio[p] = NULL; 2133 2134 if (zio->io_error == 0) { 2135 while (zio_walk_parents(zio) != NULL) 2136 ddt_phys_addref(ddp); 2137 } else { 2138 ddt_phys_clear(ddp); 2139 } 2140 2141 ddt_exit(ddt); 2142} 2143 2144static void 2145zio_ddt_ditto_write_done(zio_t *zio) 2146{ 2147 int p = DDT_PHYS_DITTO; 2148 zio_prop_t *zp = &zio->io_prop; 2149 blkptr_t *bp = zio->io_bp; 2150 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2151 ddt_entry_t *dde = zio->io_private; 2152 ddt_phys_t *ddp = &dde->dde_phys[p]; 2153 ddt_key_t *ddk = &dde->dde_key; 2154 2155 ddt_enter(ddt); 2156 2157 ASSERT(ddp->ddp_refcnt == 0); 2158 ASSERT(dde->dde_lead_zio[p] == zio); 2159 dde->dde_lead_zio[p] = NULL; 2160 2161 if (zio->io_error == 0) { 2162 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2163 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2164 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2165 if (ddp->ddp_phys_birth != 0) 2166 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2167 ddt_phys_fill(ddp, bp); 2168 } 2169 2170 ddt_exit(ddt); 2171} 2172 2173static int 2174zio_ddt_write(zio_t *zio) 2175{ 2176 spa_t *spa = zio->io_spa; 2177 blkptr_t *bp = zio->io_bp; 2178 uint64_t txg = zio->io_txg; 2179 zio_prop_t *zp = &zio->io_prop; 2180 int p = zp->zp_copies; 2181 int ditto_copies; 2182 zio_t *cio = NULL; 2183 zio_t *dio = NULL; 2184 ddt_t *ddt = ddt_select(spa, bp); 2185 ddt_entry_t *dde; 2186 ddt_phys_t *ddp; 2187 2188 ASSERT(BP_GET_DEDUP(bp)); 2189 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2190 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2191 2192 ddt_enter(ddt); 2193 dde = ddt_lookup(ddt, bp, B_TRUE); 2194 ddp = &dde->dde_phys[p]; 2195 2196 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2197 /* 2198 * If we're using a weak checksum, upgrade to a strong checksum 2199 * and try again. If we're already using a strong checksum, 2200 * we can't resolve it, so just convert to an ordinary write. 2201 * (And automatically e-mail a paper to Nature?) 2202 */ 2203 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2204 zp->zp_checksum = spa_dedup_checksum(spa); 2205 zio_pop_transforms(zio); 2206 zio->io_stage = ZIO_STAGE_OPEN; 2207 BP_ZERO(bp); 2208 } else { 2209 zp->zp_dedup = B_FALSE; 2210 } 2211 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2212 ddt_exit(ddt); 2213 return (ZIO_PIPELINE_CONTINUE); 2214 } 2215 2216 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2217 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2218 2219 if (ditto_copies > ddt_ditto_copies_present(dde) && 2220 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2221 zio_prop_t czp = *zp; 2222 2223 czp.zp_copies = ditto_copies; 2224 2225 /* 2226 * If we arrived here with an override bp, we won't have run 2227 * the transform stack, so we won't have the data we need to 2228 * generate a child i/o. So, toss the override bp and restart. 2229 * This is safe, because using the override bp is just an 2230 * optimization; and it's rare, so the cost doesn't matter. 2231 */ 2232 if (zio->io_bp_override) { 2233 zio_pop_transforms(zio); 2234 zio->io_stage = ZIO_STAGE_OPEN; 2235 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2236 zio->io_bp_override = NULL; 2237 BP_ZERO(bp); 2238 ddt_exit(ddt); 2239 return (ZIO_PIPELINE_CONTINUE); 2240 } 2241 2242 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2243 zio->io_orig_size, &czp, NULL, 2244 zio_ddt_ditto_write_done, dde, zio->io_priority, 2245 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2246 2247 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2248 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2249 } 2250 2251 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2252 if (ddp->ddp_phys_birth != 0) 2253 ddt_bp_fill(ddp, bp, txg); 2254 if (dde->dde_lead_zio[p] != NULL) 2255 zio_add_child(zio, dde->dde_lead_zio[p]); 2256 else 2257 ddt_phys_addref(ddp); 2258 } else if (zio->io_bp_override) { 2259 ASSERT(bp->blk_birth == txg); 2260 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2261 ddt_phys_fill(ddp, bp); 2262 ddt_phys_addref(ddp); 2263 } else { 2264 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2265 zio->io_orig_size, zp, zio_ddt_child_write_ready, 2266 zio_ddt_child_write_done, dde, zio->io_priority, 2267 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2268 2269 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2270 dde->dde_lead_zio[p] = cio; 2271 } 2272 2273 ddt_exit(ddt); 2274 2275 if (cio) 2276 zio_nowait(cio); 2277 if (dio) 2278 zio_nowait(dio); 2279 2280 return (ZIO_PIPELINE_CONTINUE); 2281} 2282 2283ddt_entry_t *freedde; /* for debugging */ 2284 2285static int 2286zio_ddt_free(zio_t *zio) 2287{ 2288 spa_t *spa = zio->io_spa; 2289 blkptr_t *bp = zio->io_bp; 2290 ddt_t *ddt = ddt_select(spa, bp); 2291 ddt_entry_t *dde; 2292 ddt_phys_t *ddp; 2293 2294 ASSERT(BP_GET_DEDUP(bp)); 2295 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2296 2297 ddt_enter(ddt); 2298 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2299 ddp = ddt_phys_select(dde, bp); 2300 ddt_phys_decref(ddp); 2301 ddt_exit(ddt); 2302 2303 return (ZIO_PIPELINE_CONTINUE); 2304} 2305 2306/* 2307 * ========================================================================== 2308 * Allocate and free blocks 2309 * ========================================================================== 2310 */ 2311static int 2312zio_dva_allocate(zio_t *zio) 2313{ 2314 spa_t *spa = zio->io_spa; 2315 metaslab_class_t *mc = spa_normal_class(spa); 2316 blkptr_t *bp = zio->io_bp; 2317 int error; 2318 int flags = 0; 2319 2320 if (zio->io_gang_leader == NULL) { 2321 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2322 zio->io_gang_leader = zio; 2323 } 2324 2325 ASSERT(BP_IS_HOLE(bp)); 2326 ASSERT0(BP_GET_NDVAS(bp)); 2327 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2328 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2329 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2330 2331 /* 2332 * The dump device does not support gang blocks so allocation on 2333 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid 2334 * the "fast" gang feature. 2335 */ 2336 flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; 2337 flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? 2338 METASLAB_GANG_CHILD : 0; 2339 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2340 zio->io_prop.zp_copies, zio->io_txg, NULL, flags); 2341 2342 if (error) { 2343 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2344 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2345 error); 2346 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2347 return (zio_write_gang_block(zio)); 2348 zio->io_error = error; 2349 } 2350 2351 return (ZIO_PIPELINE_CONTINUE); 2352} 2353 2354static int 2355zio_dva_free(zio_t *zio) 2356{ 2357 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2358 2359 return (ZIO_PIPELINE_CONTINUE); 2360} 2361 2362static int 2363zio_dva_claim(zio_t *zio) 2364{ 2365 int error; 2366 2367 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2368 if (error) 2369 zio->io_error = error; 2370 2371 return (ZIO_PIPELINE_CONTINUE); 2372} 2373 2374/* 2375 * Undo an allocation. This is used by zio_done() when an I/O fails 2376 * and we want to give back the block we just allocated. 2377 * This handles both normal blocks and gang blocks. 2378 */ 2379static void 2380zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2381{ 2382 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2383 ASSERT(zio->io_bp_override == NULL); 2384 2385 if (!BP_IS_HOLE(bp)) 2386 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2387 2388 if (gn != NULL) { 2389 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2390 zio_dva_unallocate(zio, gn->gn_child[g], 2391 &gn->gn_gbh->zg_blkptr[g]); 2392 } 2393 } 2394} 2395 2396/* 2397 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2398 */ 2399int 2400zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2401 uint64_t size, boolean_t use_slog) 2402{ 2403 int error = 1; 2404 2405 ASSERT(txg > spa_syncing_txg(spa)); 2406 2407 /* 2408 * ZIL blocks are always contiguous (i.e. not gang blocks) so we 2409 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" 2410 * when allocating them. 2411 */ 2412 if (use_slog) { 2413 error = metaslab_alloc(spa, spa_log_class(spa), size, 2414 new_bp, 1, txg, old_bp, 2415 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2416 } 2417 2418 if (error) { 2419 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2420 new_bp, 1, txg, old_bp, 2421 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2422 } 2423 2424 if (error == 0) { 2425 BP_SET_LSIZE(new_bp, size); 2426 BP_SET_PSIZE(new_bp, size); 2427 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2428 BP_SET_CHECKSUM(new_bp, 2429 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2430 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2431 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2432 BP_SET_LEVEL(new_bp, 0); 2433 BP_SET_DEDUP(new_bp, 0); 2434 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2435 } 2436 2437 return (error); 2438} 2439 2440/* 2441 * Free an intent log block. 2442 */ 2443void 2444zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2445{ 2446 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2447 ASSERT(!BP_IS_GANG(bp)); 2448 2449 zio_free(spa, txg, bp); 2450} 2451 2452/* 2453 * ========================================================================== 2454 * Read, write and delete to physical devices 2455 * ========================================================================== 2456 */ 2457static int 2458zio_vdev_io_start(zio_t *zio) 2459{ 2460 vdev_t *vd = zio->io_vd; 2461 uint64_t align; 2462 spa_t *spa = zio->io_spa; 2463 2464 ASSERT(zio->io_error == 0); 2465 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2466 2467 if (vd == NULL) { 2468 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2469 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2470 2471 /* 2472 * The mirror_ops handle multiple DVAs in a single BP. 2473 */ 2474 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2475 } 2476 2477 if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_FREE) { 2478 trim_map_free(zio); 2479 return (ZIO_PIPELINE_CONTINUE); 2480 } 2481 2482 /* 2483 * We keep track of time-sensitive I/Os so that the scan thread 2484 * can quickly react to certain workloads. In particular, we care 2485 * about non-scrubbing, top-level reads and writes with the following 2486 * characteristics: 2487 * - synchronous writes of user data to non-slog devices 2488 * - any reads of user data 2489 * When these conditions are met, adjust the timestamp of spa_last_io 2490 * which allows the scan thread to adjust its workload accordingly. 2491 */ 2492 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2493 vd == vd->vdev_top && !vd->vdev_islog && 2494 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2495 zio->io_txg != spa_syncing_txg(spa)) { 2496 uint64_t old = spa->spa_last_io; 2497 uint64_t new = ddi_get_lbolt64(); 2498 if (old != new) 2499 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2500 } 2501 2502 align = 1ULL << vd->vdev_top->vdev_ashift; 2503 2504 if (P2PHASE(zio->io_size, align) != 0) { 2505 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2506 char *abuf = NULL; 2507 if (zio->io_type == ZIO_TYPE_READ || 2508 zio->io_type == ZIO_TYPE_WRITE) 2509 abuf = zio_buf_alloc(asize); 2510 ASSERT(vd == vd->vdev_top); 2511 if (zio->io_type == ZIO_TYPE_WRITE) { 2512 bcopy(zio->io_data, abuf, zio->io_size); 2513 bzero(abuf + zio->io_size, asize - zio->io_size); 2514 } 2515 zio_push_transform(zio, abuf, asize, abuf ? asize : 0, 2516 zio_subblock); 2517 } 2518 2519 ASSERT(P2PHASE(zio->io_offset, align) == 0); 2520 ASSERT(P2PHASE(zio->io_size, align) == 0); 2521 VERIFY(zio->io_type == ZIO_TYPE_READ || spa_writeable(spa)); 2522 2523 /* 2524 * If this is a repair I/O, and there's no self-healing involved -- 2525 * that is, we're just resilvering what we expect to resilver -- 2526 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2527 * This prevents spurious resilvering with nested replication. 2528 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2529 * A is out of date, we'll read from C+D, then use the data to 2530 * resilver A+B -- but we don't actually want to resilver B, just A. 2531 * The top-level mirror has no way to know this, so instead we just 2532 * discard unnecessary repairs as we work our way down the vdev tree. 2533 * The same logic applies to any form of nested replication: 2534 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2535 */ 2536 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2537 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2538 zio->io_txg != 0 && /* not a delegated i/o */ 2539 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2540 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2541 zio_vdev_io_bypass(zio); 2542 return (ZIO_PIPELINE_CONTINUE); 2543 } 2544 2545 if (vd->vdev_ops->vdev_op_leaf && 2546 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 2547 2548 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) 2549 return (ZIO_PIPELINE_CONTINUE); 2550 2551 if ((zio = vdev_queue_io(zio)) == NULL) 2552 return (ZIO_PIPELINE_STOP); 2553 2554 if (!vdev_accessible(vd, zio)) { 2555 zio->io_error = ENXIO; 2556 zio_interrupt(zio); 2557 return (ZIO_PIPELINE_STOP); 2558 } 2559 } 2560 2561 if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_WRITE) { 2562 if (!trim_map_write_start(zio)) 2563 return (ZIO_PIPELINE_STOP); 2564 } 2565 2566 return (vd->vdev_ops->vdev_op_io_start(zio)); 2567} 2568 2569static int 2570zio_vdev_io_done(zio_t *zio) 2571{ 2572 vdev_t *vd = zio->io_vd; 2573 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2574 boolean_t unexpected_error = B_FALSE; 2575 2576 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2577 return (ZIO_PIPELINE_STOP); 2578 2579 ASSERT(zio->io_type == ZIO_TYPE_READ || 2580 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_FREE); 2581 2582 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2583 zio->io_type == ZIO_TYPE_WRITE) { 2584 trim_map_write_done(zio); 2585 } 2586 2587 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2588 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 2589 2590 vdev_queue_io_done(zio); 2591 2592 if (zio->io_type == ZIO_TYPE_WRITE) 2593 vdev_cache_write(zio); 2594 2595 if (zio_injection_enabled && zio->io_error == 0) 2596 zio->io_error = zio_handle_device_injection(vd, 2597 zio, EIO); 2598 2599 if (zio_injection_enabled && zio->io_error == 0) 2600 zio->io_error = zio_handle_label_injection(zio, EIO); 2601 2602 if (zio->io_error) { 2603 if (!vdev_accessible(vd, zio)) { 2604 zio->io_error = ENXIO; 2605 } else { 2606 unexpected_error = B_TRUE; 2607 } 2608 } 2609 } 2610 2611 ops->vdev_op_io_done(zio); 2612 2613 if (unexpected_error) 2614 VERIFY(vdev_probe(vd, zio) == NULL); 2615 2616 return (ZIO_PIPELINE_CONTINUE); 2617} 2618 2619/* 2620 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2621 * disk, and use that to finish the checksum ereport later. 2622 */ 2623static void 2624zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2625 const void *good_buf) 2626{ 2627 /* no processing needed */ 2628 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2629} 2630 2631/*ARGSUSED*/ 2632void 2633zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2634{ 2635 void *buf = zio_buf_alloc(zio->io_size); 2636 2637 bcopy(zio->io_data, buf, zio->io_size); 2638 2639 zcr->zcr_cbinfo = zio->io_size; 2640 zcr->zcr_cbdata = buf; 2641 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2642 zcr->zcr_free = zio_buf_free; 2643} 2644 2645static int 2646zio_vdev_io_assess(zio_t *zio) 2647{ 2648 vdev_t *vd = zio->io_vd; 2649 2650 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2651 return (ZIO_PIPELINE_STOP); 2652 2653 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2654 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2655 2656 if (zio->io_vsd != NULL) { 2657 zio->io_vsd_ops->vsd_free(zio); 2658 zio->io_vsd = NULL; 2659 } 2660 2661 if (zio_injection_enabled && zio->io_error == 0) 2662 zio->io_error = zio_handle_fault_injection(zio, EIO); 2663 2664 if (zio->io_type == ZIO_TYPE_IOCTL && zio->io_cmd == DKIOCTRIM) 2665 switch (zio->io_error) { 2666 case 0: 2667 ZIO_TRIM_STAT_INCR(bytes, zio->io_size); 2668 ZIO_TRIM_STAT_BUMP(success); 2669 break; 2670 case EOPNOTSUPP: 2671 ZIO_TRIM_STAT_BUMP(unsupported); 2672 break; 2673 default: 2674 ZIO_TRIM_STAT_BUMP(failed); 2675 break; 2676 } 2677 2678 /* 2679 * If the I/O failed, determine whether we should attempt to retry it. 2680 * 2681 * On retry, we cut in line in the issue queue, since we don't want 2682 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2683 */ 2684 if (zio->io_error && vd == NULL && 2685 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2686 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2687 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2688 zio->io_error = 0; 2689 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2690 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2691 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2692 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2693 zio_requeue_io_start_cut_in_line); 2694 return (ZIO_PIPELINE_STOP); 2695 } 2696 2697 /* 2698 * If we got an error on a leaf device, convert it to ENXIO 2699 * if the device is not accessible at all. 2700 */ 2701 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2702 !vdev_accessible(vd, zio)) 2703 zio->io_error = ENXIO; 2704 2705 /* 2706 * If we can't write to an interior vdev (mirror or RAID-Z), 2707 * set vdev_cant_write so that we stop trying to allocate from it. 2708 */ 2709 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2710 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 2711 vd->vdev_cant_write = B_TRUE; 2712 } 2713 2714 if (zio->io_error) 2715 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2716 2717 return (ZIO_PIPELINE_CONTINUE); 2718} 2719 2720void 2721zio_vdev_io_reissue(zio_t *zio) 2722{ 2723 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2724 ASSERT(zio->io_error == 0); 2725 2726 zio->io_stage >>= 1; 2727} 2728 2729void 2730zio_vdev_io_redone(zio_t *zio) 2731{ 2732 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2733 2734 zio->io_stage >>= 1; 2735} 2736 2737void 2738zio_vdev_io_bypass(zio_t *zio) 2739{ 2740 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2741 ASSERT(zio->io_error == 0); 2742 2743 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2744 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2745} 2746 2747/* 2748 * ========================================================================== 2749 * Generate and verify checksums 2750 * ========================================================================== 2751 */ 2752static int 2753zio_checksum_generate(zio_t *zio) 2754{ 2755 blkptr_t *bp = zio->io_bp; 2756 enum zio_checksum checksum; 2757 2758 if (bp == NULL) { 2759 /* 2760 * This is zio_write_phys(). 2761 * We're either generating a label checksum, or none at all. 2762 */ 2763 checksum = zio->io_prop.zp_checksum; 2764 2765 if (checksum == ZIO_CHECKSUM_OFF) 2766 return (ZIO_PIPELINE_CONTINUE); 2767 2768 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2769 } else { 2770 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2771 ASSERT(!IO_IS_ALLOCATING(zio)); 2772 checksum = ZIO_CHECKSUM_GANG_HEADER; 2773 } else { 2774 checksum = BP_GET_CHECKSUM(bp); 2775 } 2776 } 2777 2778 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2779 2780 return (ZIO_PIPELINE_CONTINUE); 2781} 2782 2783static int 2784zio_checksum_verify(zio_t *zio) 2785{ 2786 zio_bad_cksum_t info; 2787 blkptr_t *bp = zio->io_bp; 2788 int error; 2789 2790 ASSERT(zio->io_vd != NULL); 2791 2792 if (bp == NULL) { 2793 /* 2794 * This is zio_read_phys(). 2795 * We're either verifying a label checksum, or nothing at all. 2796 */ 2797 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2798 return (ZIO_PIPELINE_CONTINUE); 2799 2800 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2801 } 2802 2803 if ((error = zio_checksum_error(zio, &info)) != 0) { 2804 zio->io_error = error; 2805 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2806 zfs_ereport_start_checksum(zio->io_spa, 2807 zio->io_vd, zio, zio->io_offset, 2808 zio->io_size, NULL, &info); 2809 } 2810 } 2811 2812 return (ZIO_PIPELINE_CONTINUE); 2813} 2814 2815/* 2816 * Called by RAID-Z to ensure we don't compute the checksum twice. 2817 */ 2818void 2819zio_checksum_verified(zio_t *zio) 2820{ 2821 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 2822} 2823 2824/* 2825 * ========================================================================== 2826 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2827 * An error of 0 indictes success. ENXIO indicates whole-device failure, 2828 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2829 * indicate errors that are specific to one I/O, and most likely permanent. 2830 * Any other error is presumed to be worse because we weren't expecting it. 2831 * ========================================================================== 2832 */ 2833int 2834zio_worst_error(int e1, int e2) 2835{ 2836 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2837 int r1, r2; 2838 2839 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2840 if (e1 == zio_error_rank[r1]) 2841 break; 2842 2843 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2844 if (e2 == zio_error_rank[r2]) 2845 break; 2846 2847 return (r1 > r2 ? e1 : e2); 2848} 2849 2850/* 2851 * ========================================================================== 2852 * I/O completion 2853 * ========================================================================== 2854 */ 2855static int 2856zio_ready(zio_t *zio) 2857{ 2858 blkptr_t *bp = zio->io_bp; 2859 zio_t *pio, *pio_next; 2860 2861 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 2862 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 2863 return (ZIO_PIPELINE_STOP); 2864 2865 if (zio->io_ready) { 2866 ASSERT(IO_IS_ALLOCATING(zio)); 2867 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 2868 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 2869 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 2870 2871 zio->io_ready(zio); 2872 } 2873 2874 if (bp != NULL && bp != &zio->io_bp_copy) 2875 zio->io_bp_copy = *bp; 2876 2877 if (zio->io_error) 2878 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2879 2880 mutex_enter(&zio->io_lock); 2881 zio->io_state[ZIO_WAIT_READY] = 1; 2882 pio = zio_walk_parents(zio); 2883 mutex_exit(&zio->io_lock); 2884 2885 /* 2886 * As we notify zio's parents, new parents could be added. 2887 * New parents go to the head of zio's io_parent_list, however, 2888 * so we will (correctly) not notify them. The remainder of zio's 2889 * io_parent_list, from 'pio_next' onward, cannot change because 2890 * all parents must wait for us to be done before they can be done. 2891 */ 2892 for (; pio != NULL; pio = pio_next) { 2893 pio_next = zio_walk_parents(zio); 2894 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 2895 } 2896 2897 if (zio->io_flags & ZIO_FLAG_NODATA) { 2898 if (BP_IS_GANG(bp)) { 2899 zio->io_flags &= ~ZIO_FLAG_NODATA; 2900 } else { 2901 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 2902 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2903 } 2904 } 2905 2906 if (zio_injection_enabled && 2907 zio->io_spa->spa_syncing_txg == zio->io_txg) 2908 zio_handle_ignored_writes(zio); 2909 2910 return (ZIO_PIPELINE_CONTINUE); 2911} 2912 2913static int 2914zio_done(zio_t *zio) 2915{ 2916 spa_t *spa = zio->io_spa; 2917 zio_t *lio = zio->io_logical; 2918 blkptr_t *bp = zio->io_bp; 2919 vdev_t *vd = zio->io_vd; 2920 uint64_t psize = zio->io_size; 2921 zio_t *pio, *pio_next; 2922 2923 /* 2924 * If our children haven't all completed, 2925 * wait for them and then repeat this pipeline stage. 2926 */ 2927 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 2928 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 2929 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 2930 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 2931 return (ZIO_PIPELINE_STOP); 2932 2933 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2934 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2935 ASSERT(zio->io_children[c][w] == 0); 2936 2937 if (bp != NULL) { 2938 ASSERT(bp->blk_pad[0] == 0); 2939 ASSERT(bp->blk_pad[1] == 0); 2940 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 2941 (bp == zio_unique_parent(zio)->io_bp)); 2942 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 2943 zio->io_bp_override == NULL && 2944 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 2945 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 2946 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 2947 ASSERT(BP_COUNT_GANG(bp) == 0 || 2948 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 2949 } 2950 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 2951 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 2952 } 2953 2954 /* 2955 * If there were child vdev/gang/ddt errors, they apply to us now. 2956 */ 2957 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 2958 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 2959 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 2960 2961 /* 2962 * If the I/O on the transformed data was successful, generate any 2963 * checksum reports now while we still have the transformed data. 2964 */ 2965 if (zio->io_error == 0) { 2966 while (zio->io_cksum_report != NULL) { 2967 zio_cksum_report_t *zcr = zio->io_cksum_report; 2968 uint64_t align = zcr->zcr_align; 2969 uint64_t asize = P2ROUNDUP(psize, align); 2970 char *abuf = zio->io_data; 2971 2972 if (asize != psize) { 2973 abuf = zio_buf_alloc(asize); 2974 bcopy(zio->io_data, abuf, psize); 2975 bzero(abuf + psize, asize - psize); 2976 } 2977 2978 zio->io_cksum_report = zcr->zcr_next; 2979 zcr->zcr_next = NULL; 2980 zcr->zcr_finish(zcr, abuf); 2981 zfs_ereport_free_checksum(zcr); 2982 2983 if (asize != psize) 2984 zio_buf_free(abuf, asize); 2985 } 2986 } 2987 2988 zio_pop_transforms(zio); /* note: may set zio->io_error */ 2989 2990 vdev_stat_update(zio, psize); 2991 2992 if (zio->io_error) { 2993 /* 2994 * If this I/O is attached to a particular vdev, 2995 * generate an error message describing the I/O failure 2996 * at the block level. We ignore these errors if the 2997 * device is currently unavailable. 2998 */ 2999 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3000 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3001 3002 if ((zio->io_error == EIO || !(zio->io_flags & 3003 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3004 zio == lio) { 3005 /* 3006 * For logical I/O requests, tell the SPA to log the 3007 * error and generate a logical data ereport. 3008 */ 3009 spa_log_error(spa, zio); 3010 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3011 0, 0); 3012 } 3013 } 3014 3015 if (zio->io_error && zio == lio) { 3016 /* 3017 * Determine whether zio should be reexecuted. This will 3018 * propagate all the way to the root via zio_notify_parent(). 3019 */ 3020 ASSERT(vd == NULL && bp != NULL); 3021 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3022 3023 if (IO_IS_ALLOCATING(zio) && 3024 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3025 if (zio->io_error != ENOSPC) 3026 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3027 else 3028 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3029 } 3030 3031 if ((zio->io_type == ZIO_TYPE_READ || 3032 zio->io_type == ZIO_TYPE_FREE) && 3033 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3034 zio->io_error == ENXIO && 3035 spa_load_state(spa) == SPA_LOAD_NONE && 3036 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3037 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3038 3039 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3040 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3041 3042 /* 3043 * Here is a possibly good place to attempt to do 3044 * either combinatorial reconstruction or error correction 3045 * based on checksums. It also might be a good place 3046 * to send out preliminary ereports before we suspend 3047 * processing. 3048 */ 3049 } 3050 3051 /* 3052 * If there were logical child errors, they apply to us now. 3053 * We defer this until now to avoid conflating logical child 3054 * errors with errors that happened to the zio itself when 3055 * updating vdev stats and reporting FMA events above. 3056 */ 3057 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3058 3059 if ((zio->io_error || zio->io_reexecute) && 3060 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3061 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3062 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3063 3064 zio_gang_tree_free(&zio->io_gang_tree); 3065 3066 /* 3067 * Godfather I/Os should never suspend. 3068 */ 3069 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3070 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3071 zio->io_reexecute = 0; 3072 3073 if (zio->io_reexecute) { 3074 /* 3075 * This is a logical I/O that wants to reexecute. 3076 * 3077 * Reexecute is top-down. When an i/o fails, if it's not 3078 * the root, it simply notifies its parent and sticks around. 3079 * The parent, seeing that it still has children in zio_done(), 3080 * does the same. This percolates all the way up to the root. 3081 * The root i/o will reexecute or suspend the entire tree. 3082 * 3083 * This approach ensures that zio_reexecute() honors 3084 * all the original i/o dependency relationships, e.g. 3085 * parents not executing until children are ready. 3086 */ 3087 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3088 3089 zio->io_gang_leader = NULL; 3090 3091 mutex_enter(&zio->io_lock); 3092 zio->io_state[ZIO_WAIT_DONE] = 1; 3093 mutex_exit(&zio->io_lock); 3094 3095 /* 3096 * "The Godfather" I/O monitors its children but is 3097 * not a true parent to them. It will track them through 3098 * the pipeline but severs its ties whenever they get into 3099 * trouble (e.g. suspended). This allows "The Godfather" 3100 * I/O to return status without blocking. 3101 */ 3102 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3103 zio_link_t *zl = zio->io_walk_link; 3104 pio_next = zio_walk_parents(zio); 3105 3106 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3107 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3108 zio_remove_child(pio, zio, zl); 3109 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3110 } 3111 } 3112 3113 if ((pio = zio_unique_parent(zio)) != NULL) { 3114 /* 3115 * We're not a root i/o, so there's nothing to do 3116 * but notify our parent. Don't propagate errors 3117 * upward since we haven't permanently failed yet. 3118 */ 3119 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3120 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3121 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3122 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3123 /* 3124 * We'd fail again if we reexecuted now, so suspend 3125 * until conditions improve (e.g. device comes online). 3126 */ 3127 zio_suspend(spa, zio); 3128 } else { 3129 /* 3130 * Reexecution is potentially a huge amount of work. 3131 * Hand it off to the otherwise-unused claim taskq. 3132 */ 3133#ifdef _KERNEL 3134 (void) taskq_dispatch_safe( 3135 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 3136 (task_func_t *)zio_reexecute, zio, TQ_SLEEP, 3137 &zio->io_task); 3138#else 3139 (void) taskq_dispatch( 3140 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 3141 (task_func_t *)zio_reexecute, zio, TQ_SLEEP); 3142#endif 3143 } 3144 return (ZIO_PIPELINE_STOP); 3145 } 3146 3147 ASSERT(zio->io_child_count == 0); 3148 ASSERT(zio->io_reexecute == 0); 3149 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3150 3151 /* 3152 * Report any checksum errors, since the I/O is complete. 3153 */ 3154 while (zio->io_cksum_report != NULL) { 3155 zio_cksum_report_t *zcr = zio->io_cksum_report; 3156 zio->io_cksum_report = zcr->zcr_next; 3157 zcr->zcr_next = NULL; 3158 zcr->zcr_finish(zcr, NULL); 3159 zfs_ereport_free_checksum(zcr); 3160 } 3161 3162 /* 3163 * It is the responsibility of the done callback to ensure that this 3164 * particular zio is no longer discoverable for adoption, and as 3165 * such, cannot acquire any new parents. 3166 */ 3167 if (zio->io_done) 3168 zio->io_done(zio); 3169 3170 mutex_enter(&zio->io_lock); 3171 zio->io_state[ZIO_WAIT_DONE] = 1; 3172 mutex_exit(&zio->io_lock); 3173 3174 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3175 zio_link_t *zl = zio->io_walk_link; 3176 pio_next = zio_walk_parents(zio); 3177 zio_remove_child(pio, zio, zl); 3178 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3179 } 3180 3181 if (zio->io_waiter != NULL) { 3182 mutex_enter(&zio->io_lock); 3183 zio->io_executor = NULL; 3184 cv_broadcast(&zio->io_cv); 3185 mutex_exit(&zio->io_lock); 3186 } else { 3187 zio_destroy(zio); 3188 } 3189 3190 return (ZIO_PIPELINE_STOP); 3191} 3192 3193/* 3194 * ========================================================================== 3195 * I/O pipeline definition 3196 * ========================================================================== 3197 */ 3198static zio_pipe_stage_t *zio_pipeline[] = { 3199 NULL, 3200 zio_read_bp_init, 3201 zio_free_bp_init, 3202 zio_issue_async, 3203 zio_write_bp_init, 3204 zio_checksum_generate, 3205 zio_nop_write, 3206 zio_ddt_read_start, 3207 zio_ddt_read_done, 3208 zio_ddt_write, 3209 zio_ddt_free, 3210 zio_gang_assemble, 3211 zio_gang_issue, 3212 zio_dva_allocate, 3213 zio_dva_free, 3214 zio_dva_claim, 3215 zio_ready, 3216 zio_vdev_io_start, 3217 zio_vdev_io_done, 3218 zio_vdev_io_assess, 3219 zio_checksum_verify, 3220 zio_done 3221}; 3222 3223/* dnp is the dnode for zb1->zb_object */ 3224boolean_t 3225zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_t *zb1, 3226 const zbookmark_t *zb2) 3227{ 3228 uint64_t zb1nextL0, zb2thisobj; 3229 3230 ASSERT(zb1->zb_objset == zb2->zb_objset); 3231 ASSERT(zb2->zb_level == 0); 3232 3233 /* 3234 * A bookmark in the deadlist is considered to be after 3235 * everything else. 3236 */ 3237 if (zb2->zb_object == DMU_DEADLIST_OBJECT) 3238 return (B_TRUE); 3239 3240 /* The objset_phys_t isn't before anything. */ 3241 if (dnp == NULL) 3242 return (B_FALSE); 3243 3244 zb1nextL0 = (zb1->zb_blkid + 1) << 3245 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); 3246 3247 zb2thisobj = zb2->zb_object ? zb2->zb_object : 3248 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); 3249 3250 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3251 uint64_t nextobj = zb1nextL0 * 3252 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; 3253 return (nextobj <= zb2thisobj); 3254 } 3255 3256 if (zb1->zb_object < zb2thisobj) 3257 return (B_TRUE); 3258 if (zb1->zb_object > zb2thisobj) 3259 return (B_FALSE); 3260 if (zb2->zb_object == DMU_META_DNODE_OBJECT) 3261 return (B_FALSE); 3262 return (zb1nextL0 <= zb2->zb_blkid); 3263} 3264