zio.c revision 277618
17582Sppunegov/* 211833Sctornqvi * CDDL HEADER START 37582Sppunegov * 47582Sppunegov * The contents of this file are subject to the terms of the 57582Sppunegov * Common Development and Distribution License (the "License"). 67582Sppunegov * You may not use this file except in compliance with the License. 77582Sppunegov * 87582Sppunegov * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97582Sppunegov * or http://www.opensolaris.org/os/licensing. 107582Sppunegov * See the License for the specific language governing permissions 117582Sppunegov * and limitations under the License. 127582Sppunegov * 137582Sppunegov * When distributing Covered Code, include this CDDL HEADER in each 147582Sppunegov * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157582Sppunegov * If applicable, add the following below this CDDL HEADER, with the 167582Sppunegov * fields enclosed by brackets "[]" replaced with your own identifying 177582Sppunegov * information: Portions Copyright [yyyy] [name of copyright owner] 187582Sppunegov * 197582Sppunegov * CDDL HEADER END 207582Sppunegov */ 217582Sppunegov/* 227582Sppunegov * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 237582Sppunegov * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 247582Sppunegov * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 257582Sppunegov */ 2611707Stpivovarova 2711833Sctornqvi#include <sys/sysmacros.h> 2810551Schegar#include <sys/zfs_context.h> 298013Sykantser#include <sys/fm/fs/zfs.h> 3011707Stpivovarova#include <sys/spa.h> 3111833Sctornqvi#include <sys/txg.h> 3211707Stpivovarova#include <sys/spa_impl.h> 3310978Sneliasso#include <sys/vdev_impl.h> 347582Sppunegov#include <sys/zio_impl.h> 3511707Stpivovarova#include <sys/zio_compress.h> 3611707Stpivovarova#include <sys/zio_checksum.h> 3711707Stpivovarova#include <sys/dmu_objset.h> 3811707Stpivovarova#include <sys/arc.h> 397582Sppunegov#include <sys/ddt.h> 4011534Siignatyev#include <sys/trim_map.h> 4111707Stpivovarova#include <sys/blkptr.h> 4211707Stpivovarova#include <sys/zfeature.h> 4311707Stpivovarova 4411707StpivovarovaSYSCTL_DECL(_vfs_zfs); 4511534SiignatyevSYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 4611534Siignatyev#if defined(__amd64__) 4711534Siignatyevstatic int zio_use_uma = 1; 487582Sppunegov#else 497582Sppunegovstatic int zio_use_uma = 0; 507582Sppunegov#endif 517582SppunegovTUNABLE_INT("vfs.zfs.zio.use_uma", &zio_use_uma); 527582SppunegovSYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, 537582Sppunegov "Use uma(9) for ZIO allocations"); 547582Sppunegovstatic int zio_exclude_metadata = 0; 557582SppunegovTUNABLE_INT("vfs.zfs.zio.exclude_metadata", &zio_exclude_metadata); 567582SppunegovSYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0, 577582Sppunegov "Exclude metadata buffers from dumps as well"); 587582Sppunegov 597582Sppunegovzio_trim_stats_t zio_trim_stats = { 607582Sppunegov { "bytes", KSTAT_DATA_UINT64, 617582Sppunegov "Number of bytes successfully TRIMmed" }, 627582Sppunegov { "success", KSTAT_DATA_UINT64, 637582Sppunegov "Number of successful TRIM requests" }, 647582Sppunegov { "unsupported", KSTAT_DATA_UINT64, 657582Sppunegov "Number of TRIM requests that failed because TRIM is not supported" }, 667582Sppunegov { "failed", KSTAT_DATA_UINT64, 6711707Stpivovarova "Number of TRIM requests that failed for reasons other than not supported" }, 6811707Stpivovarova}; 6911707Stpivovarova 7011707Stpivovarovastatic kstat_t *zio_trim_ksp; 7111707Stpivovarova 7211707Stpivovarova/* 7311707Stpivovarova * ========================================================================== 7411707Stpivovarova * I/O type descriptions 7511707Stpivovarova * ========================================================================== 7611707Stpivovarova */ 7711707Stpivovarovaconst char *zio_type_name[ZIO_TYPES] = { 7811707Stpivovarova "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 7911707Stpivovarova "zio_ioctl" 8011707Stpivovarova}; 8111707Stpivovarova 8211707Stpivovarova/* 8311707Stpivovarova * ========================================================================== 8411707Stpivovarova * I/O kmem caches 8511707Stpivovarova * ========================================================================== 867582Sppunegov */ 8711707Stpivovarovakmem_cache_t *zio_cache; 8811707Stpivovarovakmem_cache_t *zio_link_cache; 897582Sppunegovkmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 9011707Stpivovarovakmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 9111707Stpivovarova 9211707Stpivovarova#ifdef _KERNEL 937582Sppunegovextern vmem_t *zio_alloc_arena; 947582Sppunegov#endif 9511707Stpivovarova 9611707Stpivovarova/* 9711707Stpivovarova * The following actions directly effect the spa's sync-to-convergence logic. 987582Sppunegov * The values below define the sync pass when we start performing the action. 997582Sppunegov * Care should be taken when changing these values as they directly impact 10011707Stpivovarova * spa_sync() performance. Tuning these values may introduce subtle performance 10111707Stpivovarova * pathologies and should only be done in the context of performance analysis. 10211707Stpivovarova * These tunables will eventually be removed and replaced with #defines once 1037582Sppunegov * enough analysis has been done to determine optimal values. 1047582Sppunegov * 10511707Stpivovarova * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 10611707Stpivovarova * regular blocks are not deferred. 10711707Stpivovarova */ 10811707Stpivovarovaint zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 1097582SppunegovTUNABLE_INT("vfs.zfs.sync_pass_deferred_free", &zfs_sync_pass_deferred_free); 1107582SppunegovSYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN, 11111707Stpivovarova &zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass"); 11211707Stpivovarovaint zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 11311707StpivovarovaTUNABLE_INT("vfs.zfs.sync_pass_dont_compress", &zfs_sync_pass_dont_compress); 11411707StpivovarovaSYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN, 11511707Stpivovarova &zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass"); 11611707Stpivovarovaint zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 11711707StpivovarovaTUNABLE_INT("vfs.zfs.sync_pass_rewrite", &zfs_sync_pass_rewrite); 11811707StpivovarovaSYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN, 11911707Stpivovarova &zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass"); 1207582Sppunegov 12111707Stpivovarova/* 12211707Stpivovarova * An allocating zio is one that either currently has the DVA allocate 12311707Stpivovarova * stage set or will have it later in its lifetime. 1247582Sppunegov */ 12511707Stpivovarova#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 12611707Stpivovarova 12711707Stpivovarovaboolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 1287582Sppunegov 12911707Stpivovarova#ifdef ZFS_DEBUG 13011707Stpivovarovaint zio_buf_debug_limit = 16384; 13111707Stpivovarova#else 1327582Sppunegovint zio_buf_debug_limit = 0; 13311707Stpivovarova#endif 13411707Stpivovarova 13511707Stpivovarovavoid 13611707Stpivovarovazio_init(void) 13711707Stpivovarova{ 13811707Stpivovarova size_t c; 13911707Stpivovarova zio_cache = kmem_cache_create("zio_cache", 1407582Sppunegov sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 14111707Stpivovarova zio_link_cache = kmem_cache_create("zio_link_cache", 14211707Stpivovarova sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 14311707Stpivovarova if (!zio_use_uma) 1447582Sppunegov goto out; 14511707Stpivovarova 14611707Stpivovarova /* 14711707Stpivovarova * For small buffers, we want a cache for each multiple of 14811707Stpivovarova * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 14911707Stpivovarova * for each quarter-power of 2. 15011707Stpivovarova */ 15111707Stpivovarova for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 1527582Sppunegov size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 15311707Stpivovarova size_t p2 = size; 15411707Stpivovarova size_t align = 0; 15511707Stpivovarova size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 1567582Sppunegov 15711707Stpivovarova while (!ISP2(p2)) 15811707Stpivovarova p2 &= p2 - 1; 15911707Stpivovarova 1607582Sppunegov#ifdef illumos 16111707Stpivovarova#ifndef _KERNEL 16211707Stpivovarova /* 16311707Stpivovarova * If we are using watchpoints, put each buffer on its own page, 1647582Sppunegov * to eliminate the performance overhead of trapping to the 16511707Stpivovarova * kernel when modifying a non-watched buffer that shares the 16611707Stpivovarova * page with a watched buffer. 16711707Stpivovarova */ 16811707Stpivovarova if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 16911707Stpivovarova continue; 17011707Stpivovarova#endif 17111707Stpivovarova#endif /* illumos */ 1727582Sppunegov if (size <= 4 * SPA_MINBLOCKSIZE) { 17311707Stpivovarova align = SPA_MINBLOCKSIZE; 17411707Stpivovarova } else if (IS_P2ALIGNED(size, p2 >> 2)) { 17511707Stpivovarova align = MIN(p2 >> 2, PAGESIZE); 17611707Stpivovarova } 17711707Stpivovarova 17811707Stpivovarova if (align != 0) { 17911707Stpivovarova char name[36]; 18011707Stpivovarova (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 18111707Stpivovarova zio_buf_cache[c] = kmem_cache_create(name, size, 1827582Sppunegov align, NULL, NULL, NULL, NULL, NULL, cflags); 18311707Stpivovarova 18411707Stpivovarova /* 18511707Stpivovarova * Since zio_data bufs do not appear in crash dumps, we 18611707Stpivovarova * pass KMC_NOTOUCH so that no allocator metadata is 1877582Sppunegov * stored with the buffers. 18811707Stpivovarova */ 18911707Stpivovarova (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 19011707Stpivovarova zio_data_buf_cache[c] = kmem_cache_create(name, size, 19111707Stpivovarova align, NULL, NULL, NULL, NULL, NULL, 1927582Sppunegov cflags | KMC_NOTOUCH | KMC_NODEBUG); 19311707Stpivovarova } 19411707Stpivovarova } 19511707Stpivovarova 19611707Stpivovarova while (--c != 0) { 19711707Stpivovarova ASSERT(zio_buf_cache[c] != NULL); 19811707Stpivovarova if (zio_buf_cache[c - 1] == NULL) 19911707Stpivovarova zio_buf_cache[c - 1] = zio_buf_cache[c]; 20011707Stpivovarova 2017582Sppunegov ASSERT(zio_data_buf_cache[c] != NULL); 2027582Sppunegov if (zio_data_buf_cache[c - 1] == NULL) 20311707Stpivovarova zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 204 } 205out: 206 207 zio_inject_init(); 208 209 zio_trim_ksp = kstat_create("zfs", 0, "zio_trim", "misc", 210 KSTAT_TYPE_NAMED, 211 sizeof(zio_trim_stats) / sizeof(kstat_named_t), 212 KSTAT_FLAG_VIRTUAL); 213 214 if (zio_trim_ksp != NULL) { 215 zio_trim_ksp->ks_data = &zio_trim_stats; 216 kstat_install(zio_trim_ksp); 217 } 218} 219 220void 221zio_fini(void) 222{ 223 size_t c; 224 kmem_cache_t *last_cache = NULL; 225 kmem_cache_t *last_data_cache = NULL; 226 227 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 228 if (zio_buf_cache[c] != last_cache) { 229 last_cache = zio_buf_cache[c]; 230 kmem_cache_destroy(zio_buf_cache[c]); 231 } 232 zio_buf_cache[c] = NULL; 233 234 if (zio_data_buf_cache[c] != last_data_cache) { 235 last_data_cache = zio_data_buf_cache[c]; 236 kmem_cache_destroy(zio_data_buf_cache[c]); 237 } 238 zio_data_buf_cache[c] = NULL; 239 } 240 241 kmem_cache_destroy(zio_link_cache); 242 kmem_cache_destroy(zio_cache); 243 244 zio_inject_fini(); 245 246 if (zio_trim_ksp != NULL) { 247 kstat_delete(zio_trim_ksp); 248 zio_trim_ksp = NULL; 249 } 250} 251 252/* 253 * ========================================================================== 254 * Allocate and free I/O buffers 255 * ========================================================================== 256 */ 257 258/* 259 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 260 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 261 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 262 * excess / transient data in-core during a crashdump. 263 */ 264void * 265zio_buf_alloc(size_t size) 266{ 267 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 268 int flags = zio_exclude_metadata ? KM_NODEBUG : 0; 269 270 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 271 272 if (zio_use_uma) 273 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 274 else 275 return (kmem_alloc(size, KM_SLEEP|flags)); 276} 277 278/* 279 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 280 * crashdump if the kernel panics. This exists so that we will limit the amount 281 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 282 * of kernel heap dumped to disk when the kernel panics) 283 */ 284void * 285zio_data_buf_alloc(size_t size) 286{ 287 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 288 289 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 290 291 if (zio_use_uma) 292 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 293 else 294 return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG)); 295} 296 297void 298zio_buf_free(void *buf, size_t size) 299{ 300 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 301 302 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 303 304 if (zio_use_uma) 305 kmem_cache_free(zio_buf_cache[c], buf); 306 else 307 kmem_free(buf, size); 308} 309 310void 311zio_data_buf_free(void *buf, size_t size) 312{ 313 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 314 315 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 316 317 if (zio_use_uma) 318 kmem_cache_free(zio_data_buf_cache[c], buf); 319 else 320 kmem_free(buf, size); 321} 322 323/* 324 * ========================================================================== 325 * Push and pop I/O transform buffers 326 * ========================================================================== 327 */ 328static void 329zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 330 zio_transform_func_t *transform) 331{ 332 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 333 334 zt->zt_orig_data = zio->io_data; 335 zt->zt_orig_size = zio->io_size; 336 zt->zt_bufsize = bufsize; 337 zt->zt_transform = transform; 338 339 zt->zt_next = zio->io_transform_stack; 340 zio->io_transform_stack = zt; 341 342 zio->io_data = data; 343 zio->io_size = size; 344} 345 346static void 347zio_pop_transforms(zio_t *zio) 348{ 349 zio_transform_t *zt; 350 351 while ((zt = zio->io_transform_stack) != NULL) { 352 if (zt->zt_transform != NULL) 353 zt->zt_transform(zio, 354 zt->zt_orig_data, zt->zt_orig_size); 355 356 if (zt->zt_bufsize != 0) 357 zio_buf_free(zio->io_data, zt->zt_bufsize); 358 359 zio->io_data = zt->zt_orig_data; 360 zio->io_size = zt->zt_orig_size; 361 zio->io_transform_stack = zt->zt_next; 362 363 kmem_free(zt, sizeof (zio_transform_t)); 364 } 365} 366 367/* 368 * ========================================================================== 369 * I/O transform callbacks for subblocks and decompression 370 * ========================================================================== 371 */ 372static void 373zio_subblock(zio_t *zio, void *data, uint64_t size) 374{ 375 ASSERT(zio->io_size > size); 376 377 if (zio->io_type == ZIO_TYPE_READ) 378 bcopy(zio->io_data, data, size); 379} 380 381static void 382zio_decompress(zio_t *zio, void *data, uint64_t size) 383{ 384 if (zio->io_error == 0 && 385 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 386 zio->io_data, data, zio->io_size, size) != 0) 387 zio->io_error = SET_ERROR(EIO); 388} 389 390/* 391 * ========================================================================== 392 * I/O parent/child relationships and pipeline interlocks 393 * ========================================================================== 394 */ 395/* 396 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 397 * continue calling these functions until they return NULL. 398 * Otherwise, the next caller will pick up the list walk in 399 * some indeterminate state. (Otherwise every caller would 400 * have to pass in a cookie to keep the state represented by 401 * io_walk_link, which gets annoying.) 402 */ 403zio_t * 404zio_walk_parents(zio_t *cio) 405{ 406 zio_link_t *zl = cio->io_walk_link; 407 list_t *pl = &cio->io_parent_list; 408 409 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 410 cio->io_walk_link = zl; 411 412 if (zl == NULL) 413 return (NULL); 414 415 ASSERT(zl->zl_child == cio); 416 return (zl->zl_parent); 417} 418 419zio_t * 420zio_walk_children(zio_t *pio) 421{ 422 zio_link_t *zl = pio->io_walk_link; 423 list_t *cl = &pio->io_child_list; 424 425 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 426 pio->io_walk_link = zl; 427 428 if (zl == NULL) 429 return (NULL); 430 431 ASSERT(zl->zl_parent == pio); 432 return (zl->zl_child); 433} 434 435zio_t * 436zio_unique_parent(zio_t *cio) 437{ 438 zio_t *pio = zio_walk_parents(cio); 439 440 VERIFY(zio_walk_parents(cio) == NULL); 441 return (pio); 442} 443 444void 445zio_add_child(zio_t *pio, zio_t *cio) 446{ 447 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 448 449 /* 450 * Logical I/Os can have logical, gang, or vdev children. 451 * Gang I/Os can have gang or vdev children. 452 * Vdev I/Os can only have vdev children. 453 * The following ASSERT captures all of these constraints. 454 */ 455 ASSERT(cio->io_child_type <= pio->io_child_type); 456 457 zl->zl_parent = pio; 458 zl->zl_child = cio; 459 460 mutex_enter(&cio->io_lock); 461 mutex_enter(&pio->io_lock); 462 463 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 464 465 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 466 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 467 468 list_insert_head(&pio->io_child_list, zl); 469 list_insert_head(&cio->io_parent_list, zl); 470 471 pio->io_child_count++; 472 cio->io_parent_count++; 473 474 mutex_exit(&pio->io_lock); 475 mutex_exit(&cio->io_lock); 476} 477 478static void 479zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 480{ 481 ASSERT(zl->zl_parent == pio); 482 ASSERT(zl->zl_child == cio); 483 484 mutex_enter(&cio->io_lock); 485 mutex_enter(&pio->io_lock); 486 487 list_remove(&pio->io_child_list, zl); 488 list_remove(&cio->io_parent_list, zl); 489 490 pio->io_child_count--; 491 cio->io_parent_count--; 492 493 mutex_exit(&pio->io_lock); 494 mutex_exit(&cio->io_lock); 495 496 kmem_cache_free(zio_link_cache, zl); 497} 498 499static boolean_t 500zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 501{ 502 uint64_t *countp = &zio->io_children[child][wait]; 503 boolean_t waiting = B_FALSE; 504 505 mutex_enter(&zio->io_lock); 506 ASSERT(zio->io_stall == NULL); 507 if (*countp != 0) { 508 zio->io_stage >>= 1; 509 zio->io_stall = countp; 510 waiting = B_TRUE; 511 } 512 mutex_exit(&zio->io_lock); 513 514 return (waiting); 515} 516 517static void 518zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 519{ 520 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 521 int *errorp = &pio->io_child_error[zio->io_child_type]; 522 523 mutex_enter(&pio->io_lock); 524 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 525 *errorp = zio_worst_error(*errorp, zio->io_error); 526 pio->io_reexecute |= zio->io_reexecute; 527 ASSERT3U(*countp, >, 0); 528 529 (*countp)--; 530 531 if (*countp == 0 && pio->io_stall == countp) { 532 pio->io_stall = NULL; 533 mutex_exit(&pio->io_lock); 534 zio_execute(pio); 535 } else { 536 mutex_exit(&pio->io_lock); 537 } 538} 539 540static void 541zio_inherit_child_errors(zio_t *zio, enum zio_child c) 542{ 543 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 544 zio->io_error = zio->io_child_error[c]; 545} 546 547/* 548 * ========================================================================== 549 * Create the various types of I/O (read, write, free, etc) 550 * ========================================================================== 551 */ 552static zio_t * 553zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 554 void *data, uint64_t size, zio_done_func_t *done, void *private, 555 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 556 vdev_t *vd, uint64_t offset, const zbookmark_phys_t *zb, 557 enum zio_stage stage, enum zio_stage pipeline) 558{ 559 zio_t *zio; 560 561 ASSERT3U(type == ZIO_TYPE_FREE || size, <=, SPA_MAXBLOCKSIZE); 562 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 563 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 564 565 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 566 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 567 ASSERT(vd || stage == ZIO_STAGE_OPEN); 568 569 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 570 bzero(zio, sizeof (zio_t)); 571 572 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 573 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 574 575 list_create(&zio->io_parent_list, sizeof (zio_link_t), 576 offsetof(zio_link_t, zl_parent_node)); 577 list_create(&zio->io_child_list, sizeof (zio_link_t), 578 offsetof(zio_link_t, zl_child_node)); 579 580 if (vd != NULL) 581 zio->io_child_type = ZIO_CHILD_VDEV; 582 else if (flags & ZIO_FLAG_GANG_CHILD) 583 zio->io_child_type = ZIO_CHILD_GANG; 584 else if (flags & ZIO_FLAG_DDT_CHILD) 585 zio->io_child_type = ZIO_CHILD_DDT; 586 else 587 zio->io_child_type = ZIO_CHILD_LOGICAL; 588 589 if (bp != NULL) { 590 zio->io_bp = (blkptr_t *)bp; 591 zio->io_bp_copy = *bp; 592 zio->io_bp_orig = *bp; 593 if (type != ZIO_TYPE_WRITE || 594 zio->io_child_type == ZIO_CHILD_DDT) 595 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 596 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 597 zio->io_logical = zio; 598 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 599 pipeline |= ZIO_GANG_STAGES; 600 } 601 602 zio->io_spa = spa; 603 zio->io_txg = txg; 604 zio->io_done = done; 605 zio->io_private = private; 606 zio->io_type = type; 607 zio->io_priority = priority; 608 zio->io_vd = vd; 609 zio->io_offset = offset; 610 zio->io_orig_data = zio->io_data = data; 611 zio->io_orig_size = zio->io_size = size; 612 zio->io_orig_flags = zio->io_flags = flags; 613 zio->io_orig_stage = zio->io_stage = stage; 614 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 615 616 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 617 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 618 619 if (zb != NULL) 620 zio->io_bookmark = *zb; 621 622 if (pio != NULL) { 623 if (zio->io_logical == NULL) 624 zio->io_logical = pio->io_logical; 625 if (zio->io_child_type == ZIO_CHILD_GANG) 626 zio->io_gang_leader = pio->io_gang_leader; 627 zio_add_child(pio, zio); 628 } 629 630 return (zio); 631} 632 633static void 634zio_destroy(zio_t *zio) 635{ 636 list_destroy(&zio->io_parent_list); 637 list_destroy(&zio->io_child_list); 638 mutex_destroy(&zio->io_lock); 639 cv_destroy(&zio->io_cv); 640 kmem_cache_free(zio_cache, zio); 641} 642 643zio_t * 644zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 645 void *private, enum zio_flag flags) 646{ 647 zio_t *zio; 648 649 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 650 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 651 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 652 653 return (zio); 654} 655 656zio_t * 657zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 658{ 659 return (zio_null(NULL, spa, NULL, done, private, flags)); 660} 661 662void 663zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 664{ 665 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 666 zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 667 bp, (longlong_t)BP_GET_TYPE(bp)); 668 } 669 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 670 BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 671 zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 672 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 673 } 674 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 675 BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 676 zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 677 bp, (longlong_t)BP_GET_COMPRESS(bp)); 678 } 679 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 680 zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 681 bp, (longlong_t)BP_GET_LSIZE(bp)); 682 } 683 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 684 zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 685 bp, (longlong_t)BP_GET_PSIZE(bp)); 686 } 687 688 if (BP_IS_EMBEDDED(bp)) { 689 if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 690 zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 691 bp, (longlong_t)BPE_GET_ETYPE(bp)); 692 } 693 } 694 695 /* 696 * Pool-specific checks. 697 * 698 * Note: it would be nice to verify that the blk_birth and 699 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 700 * allows the birth time of log blocks (and dmu_sync()-ed blocks 701 * that are in the log) to be arbitrarily large. 702 */ 703 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 704 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 705 if (vdevid >= spa->spa_root_vdev->vdev_children) { 706 zfs_panic_recover("blkptr at %p DVA %u has invalid " 707 "VDEV %llu", 708 bp, i, (longlong_t)vdevid); 709 continue; 710 } 711 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 712 if (vd == NULL) { 713 zfs_panic_recover("blkptr at %p DVA %u has invalid " 714 "VDEV %llu", 715 bp, i, (longlong_t)vdevid); 716 continue; 717 } 718 if (vd->vdev_ops == &vdev_hole_ops) { 719 zfs_panic_recover("blkptr at %p DVA %u has hole " 720 "VDEV %llu", 721 bp, i, (longlong_t)vdevid); 722 continue; 723 } 724 if (vd->vdev_ops == &vdev_missing_ops) { 725 /* 726 * "missing" vdevs are valid during import, but we 727 * don't have their detailed info (e.g. asize), so 728 * we can't perform any more checks on them. 729 */ 730 continue; 731 } 732 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 733 uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 734 if (BP_IS_GANG(bp)) 735 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 736 if (offset + asize > vd->vdev_asize) { 737 zfs_panic_recover("blkptr at %p DVA %u has invalid " 738 "OFFSET %llu", 739 bp, i, (longlong_t)offset); 740 } 741 } 742} 743 744zio_t * 745zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 746 void *data, uint64_t size, zio_done_func_t *done, void *private, 747 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 748{ 749 zio_t *zio; 750 751 zfs_blkptr_verify(spa, bp); 752 753 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 754 data, size, done, private, 755 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 756 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 757 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 758 759 return (zio); 760} 761 762zio_t * 763zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 764 void *data, uint64_t size, const zio_prop_t *zp, 765 zio_done_func_t *ready, zio_done_func_t *physdone, zio_done_func_t *done, 766 void *private, 767 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 768{ 769 zio_t *zio; 770 771 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 772 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 773 zp->zp_compress >= ZIO_COMPRESS_OFF && 774 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 775 DMU_OT_IS_VALID(zp->zp_type) && 776 zp->zp_level < 32 && 777 zp->zp_copies > 0 && 778 zp->zp_copies <= spa_max_replication(spa)); 779 780 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 781 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 782 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 783 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 784 785 zio->io_ready = ready; 786 zio->io_physdone = physdone; 787 zio->io_prop = *zp; 788 789 /* 790 * Data can be NULL if we are going to call zio_write_override() to 791 * provide the already-allocated BP. But we may need the data to 792 * verify a dedup hit (if requested). In this case, don't try to 793 * dedup (just take the already-allocated BP verbatim). 794 */ 795 if (data == NULL && zio->io_prop.zp_dedup_verify) { 796 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 797 } 798 799 return (zio); 800} 801 802zio_t * 803zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 804 uint64_t size, zio_done_func_t *done, void *private, 805 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 806{ 807 zio_t *zio; 808 809 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 810 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 811 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 812 813 return (zio); 814} 815 816void 817zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 818{ 819 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 820 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 821 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 822 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 823 824 /* 825 * We must reset the io_prop to match the values that existed 826 * when the bp was first written by dmu_sync() keeping in mind 827 * that nopwrite and dedup are mutually exclusive. 828 */ 829 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 830 zio->io_prop.zp_nopwrite = nopwrite; 831 zio->io_prop.zp_copies = copies; 832 zio->io_bp_override = bp; 833} 834 835void 836zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 837{ 838 839 /* 840 * The check for EMBEDDED is a performance optimization. We 841 * process the free here (by ignoring it) rather than 842 * putting it on the list and then processing it in zio_free_sync(). 843 */ 844 if (BP_IS_EMBEDDED(bp)) 845 return; 846 metaslab_check_free(spa, bp); 847 848 /* 849 * Frees that are for the currently-syncing txg, are not going to be 850 * deferred, and which will not need to do a read (i.e. not GANG or 851 * DEDUP), can be processed immediately. Otherwise, put them on the 852 * in-memory list for later processing. 853 */ 854 if (zfs_trim_enabled || BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 855 txg != spa->spa_syncing_txg || 856 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 857 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 858 } else { 859 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 860 BP_GET_PSIZE(bp), 0))); 861 } 862} 863 864zio_t * 865zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 866 uint64_t size, enum zio_flag flags) 867{ 868 zio_t *zio; 869 enum zio_stage stage = ZIO_FREE_PIPELINE; 870 871 ASSERT(!BP_IS_HOLE(bp)); 872 ASSERT(spa_syncing_txg(spa) == txg); 873 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 874 875 if (BP_IS_EMBEDDED(bp)) 876 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 877 878 metaslab_check_free(spa, bp); 879 arc_freed(spa, bp); 880 881 if (zfs_trim_enabled) 882 stage |= ZIO_STAGE_ISSUE_ASYNC | ZIO_STAGE_VDEV_IO_START | 883 ZIO_STAGE_VDEV_IO_ASSESS; 884 /* 885 * GANG and DEDUP blocks can induce a read (for the gang block header, 886 * or the DDT), so issue them asynchronously so that this thread is 887 * not tied up. 888 */ 889 else if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 890 stage |= ZIO_STAGE_ISSUE_ASYNC; 891 892 flags |= ZIO_FLAG_DONT_QUEUE; 893 894 zio = zio_create(pio, spa, txg, bp, NULL, size, 895 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, flags, 896 NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 897 898 return (zio); 899} 900 901zio_t * 902zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 903 zio_done_func_t *done, void *private, enum zio_flag flags) 904{ 905 zio_t *zio; 906 907 dprintf_bp(bp, "claiming in txg %llu", txg); 908 909 if (BP_IS_EMBEDDED(bp)) 910 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 911 912 /* 913 * A claim is an allocation of a specific block. Claims are needed 914 * to support immediate writes in the intent log. The issue is that 915 * immediate writes contain committed data, but in a txg that was 916 * *not* committed. Upon opening the pool after an unclean shutdown, 917 * the intent log claims all blocks that contain immediate write data 918 * so that the SPA knows they're in use. 919 * 920 * All claims *must* be resolved in the first txg -- before the SPA 921 * starts allocating blocks -- so that nothing is allocated twice. 922 * If txg == 0 we just verify that the block is claimable. 923 */ 924 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 925 ASSERT(txg == spa_first_txg(spa) || txg == 0); 926 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 927 928 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 929 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 930 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 931 932 return (zio); 933} 934 935zio_t * 936zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, uint64_t offset, 937 uint64_t size, zio_done_func_t *done, void *private, 938 zio_priority_t priority, enum zio_flag flags) 939{ 940 zio_t *zio; 941 int c; 942 943 if (vd->vdev_children == 0) { 944 zio = zio_create(pio, spa, 0, NULL, NULL, size, done, private, 945 ZIO_TYPE_IOCTL, priority, flags, vd, offset, NULL, 946 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 947 948 zio->io_cmd = cmd; 949 } else { 950 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 951 952 for (c = 0; c < vd->vdev_children; c++) 953 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 954 offset, size, done, private, priority, flags)); 955 } 956 957 return (zio); 958} 959 960zio_t * 961zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 962 void *data, int checksum, zio_done_func_t *done, void *private, 963 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 964{ 965 zio_t *zio; 966 967 ASSERT(vd->vdev_children == 0); 968 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 969 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 970 ASSERT3U(offset + size, <=, vd->vdev_psize); 971 972 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 973 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 974 NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 975 976 zio->io_prop.zp_checksum = checksum; 977 978 return (zio); 979} 980 981zio_t * 982zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 983 void *data, int checksum, zio_done_func_t *done, void *private, 984 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 985{ 986 zio_t *zio; 987 988 ASSERT(vd->vdev_children == 0); 989 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 990 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 991 ASSERT3U(offset + size, <=, vd->vdev_psize); 992 993 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 994 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 995 NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 996 997 zio->io_prop.zp_checksum = checksum; 998 999 if (zio_checksum_table[checksum].ci_eck) { 1000 /* 1001 * zec checksums are necessarily destructive -- they modify 1002 * the end of the write buffer to hold the verifier/checksum. 1003 * Therefore, we must make a local copy in case the data is 1004 * being written to multiple places in parallel. 1005 */ 1006 void *wbuf = zio_buf_alloc(size); 1007 bcopy(data, wbuf, size); 1008 zio_push_transform(zio, wbuf, size, size, NULL); 1009 } 1010 1011 return (zio); 1012} 1013 1014/* 1015 * Create a child I/O to do some work for us. 1016 */ 1017zio_t * 1018zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1019 void *data, uint64_t size, int type, zio_priority_t priority, 1020 enum zio_flag flags, zio_done_func_t *done, void *private) 1021{ 1022 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1023 zio_t *zio; 1024 1025 ASSERT(vd->vdev_parent == 1026 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 1027 1028 if (type == ZIO_TYPE_READ && bp != NULL) { 1029 /* 1030 * If we have the bp, then the child should perform the 1031 * checksum and the parent need not. This pushes error 1032 * detection as close to the leaves as possible and 1033 * eliminates redundant checksums in the interior nodes. 1034 */ 1035 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1036 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1037 } 1038 1039 /* Not all IO types require vdev io done stage e.g. free */ 1040 if (!(pio->io_pipeline & ZIO_STAGE_VDEV_IO_DONE)) 1041 pipeline &= ~ZIO_STAGE_VDEV_IO_DONE; 1042 1043 if (vd->vdev_children == 0) 1044 offset += VDEV_LABEL_START_SIZE; 1045 1046 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 1047 1048 /* 1049 * If we've decided to do a repair, the write is not speculative -- 1050 * even if the original read was. 1051 */ 1052 if (flags & ZIO_FLAG_IO_REPAIR) 1053 flags &= ~ZIO_FLAG_SPECULATIVE; 1054 1055 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 1056 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1057 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1058 1059 zio->io_physdone = pio->io_physdone; 1060 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 1061 zio->io_logical->io_phys_children++; 1062 1063 return (zio); 1064} 1065 1066zio_t * 1067zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 1068 int type, zio_priority_t priority, enum zio_flag flags, 1069 zio_done_func_t *done, void *private) 1070{ 1071 zio_t *zio; 1072 1073 ASSERT(vd->vdev_ops->vdev_op_leaf); 1074 1075 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1076 data, size, done, private, type, priority, 1077 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1078 vd, offset, NULL, 1079 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1080 1081 return (zio); 1082} 1083 1084void 1085zio_flush(zio_t *zio, vdev_t *vd) 1086{ 1087 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 0, 0, 1088 NULL, NULL, ZIO_PRIORITY_NOW, 1089 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1090} 1091 1092zio_t * 1093zio_trim(zio_t *zio, spa_t *spa, vdev_t *vd, uint64_t offset, uint64_t size) 1094{ 1095 1096 ASSERT(vd->vdev_ops->vdev_op_leaf); 1097 1098 return (zio_create(zio, spa, 0, NULL, NULL, size, NULL, NULL, 1099 ZIO_TYPE_FREE, ZIO_PRIORITY_TRIM, ZIO_FLAG_DONT_AGGREGATE | 1100 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, 1101 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PHYS_PIPELINE)); 1102} 1103 1104void 1105zio_shrink(zio_t *zio, uint64_t size) 1106{ 1107 ASSERT(zio->io_executor == NULL); 1108 ASSERT(zio->io_orig_size == zio->io_size); 1109 ASSERT(size <= zio->io_size); 1110 1111 /* 1112 * We don't shrink for raidz because of problems with the 1113 * reconstruction when reading back less than the block size. 1114 * Note, BP_IS_RAIDZ() assumes no compression. 1115 */ 1116 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1117 if (!BP_IS_RAIDZ(zio->io_bp)) 1118 zio->io_orig_size = zio->io_size = size; 1119} 1120 1121/* 1122 * ========================================================================== 1123 * Prepare to read and write logical blocks 1124 * ========================================================================== 1125 */ 1126 1127static int 1128zio_read_bp_init(zio_t *zio) 1129{ 1130 blkptr_t *bp = zio->io_bp; 1131 1132 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1133 zio->io_child_type == ZIO_CHILD_LOGICAL && 1134 !(zio->io_flags & ZIO_FLAG_RAW)) { 1135 uint64_t psize = 1136 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1137 void *cbuf = zio_buf_alloc(psize); 1138 1139 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 1140 } 1141 1142 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1143 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1144 decode_embedded_bp_compressed(bp, zio->io_data); 1145 } else { 1146 ASSERT(!BP_IS_EMBEDDED(bp)); 1147 } 1148 1149 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1150 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1151 1152 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1153 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1154 1155 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1156 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1157 1158 return (ZIO_PIPELINE_CONTINUE); 1159} 1160 1161static int 1162zio_write_bp_init(zio_t *zio) 1163{ 1164 spa_t *spa = zio->io_spa; 1165 zio_prop_t *zp = &zio->io_prop; 1166 enum zio_compress compress = zp->zp_compress; 1167 blkptr_t *bp = zio->io_bp; 1168 uint64_t lsize = zio->io_size; 1169 uint64_t psize = lsize; 1170 int pass = 1; 1171 1172 /* 1173 * If our children haven't all reached the ready stage, 1174 * wait for them and then repeat this pipeline stage. 1175 */ 1176 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 1177 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 1178 return (ZIO_PIPELINE_STOP); 1179 1180 if (!IO_IS_ALLOCATING(zio)) 1181 return (ZIO_PIPELINE_CONTINUE); 1182 1183 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1184 1185 if (zio->io_bp_override) { 1186 ASSERT(bp->blk_birth != zio->io_txg); 1187 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1188 1189 *bp = *zio->io_bp_override; 1190 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1191 1192 if (BP_IS_EMBEDDED(bp)) 1193 return (ZIO_PIPELINE_CONTINUE); 1194 1195 /* 1196 * If we've been overridden and nopwrite is set then 1197 * set the flag accordingly to indicate that a nopwrite 1198 * has already occurred. 1199 */ 1200 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1201 ASSERT(!zp->zp_dedup); 1202 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1203 return (ZIO_PIPELINE_CONTINUE); 1204 } 1205 1206 ASSERT(!zp->zp_nopwrite); 1207 1208 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1209 return (ZIO_PIPELINE_CONTINUE); 1210 1211 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 1212 zp->zp_dedup_verify); 1213 1214 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1215 BP_SET_DEDUP(bp, 1); 1216 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1217 return (ZIO_PIPELINE_CONTINUE); 1218 } 1219 zio->io_bp_override = NULL; 1220 BP_ZERO(bp); 1221 } 1222 1223 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1224 /* 1225 * We're rewriting an existing block, which means we're 1226 * working on behalf of spa_sync(). For spa_sync() to 1227 * converge, it must eventually be the case that we don't 1228 * have to allocate new blocks. But compression changes 1229 * the blocksize, which forces a reallocate, and makes 1230 * convergence take longer. Therefore, after the first 1231 * few passes, stop compressing to ensure convergence. 1232 */ 1233 pass = spa_sync_pass(spa); 1234 1235 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1236 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1237 ASSERT(!BP_GET_DEDUP(bp)); 1238 1239 if (pass >= zfs_sync_pass_dont_compress) 1240 compress = ZIO_COMPRESS_OFF; 1241 1242 /* Make sure someone doesn't change their mind on overwrites */ 1243 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1244 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1245 } 1246 1247 if (compress != ZIO_COMPRESS_OFF) { 1248 void *cbuf = zio_buf_alloc(lsize); 1249 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1250 if (psize == 0 || psize == lsize) { 1251 compress = ZIO_COMPRESS_OFF; 1252 zio_buf_free(cbuf, lsize); 1253 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1254 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1255 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1256 encode_embedded_bp_compressed(bp, 1257 cbuf, compress, lsize, psize); 1258 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1259 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1260 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1261 zio_buf_free(cbuf, lsize); 1262 bp->blk_birth = zio->io_txg; 1263 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1264 ASSERT(spa_feature_is_active(spa, 1265 SPA_FEATURE_EMBEDDED_DATA)); 1266 return (ZIO_PIPELINE_CONTINUE); 1267 } else { 1268 /* 1269 * Round up compressed size to MINBLOCKSIZE and 1270 * zero the tail. 1271 */ 1272 size_t rounded = 1273 P2ROUNDUP(psize, (size_t)SPA_MINBLOCKSIZE); 1274 if (rounded > psize) { 1275 bzero((char *)cbuf + psize, rounded - psize); 1276 psize = rounded; 1277 } 1278 if (psize == lsize) { 1279 compress = ZIO_COMPRESS_OFF; 1280 zio_buf_free(cbuf, lsize); 1281 } else { 1282 zio_push_transform(zio, cbuf, 1283 psize, lsize, NULL); 1284 } 1285 } 1286 } 1287 1288 /* 1289 * The final pass of spa_sync() must be all rewrites, but the first 1290 * few passes offer a trade-off: allocating blocks defers convergence, 1291 * but newly allocated blocks are sequential, so they can be written 1292 * to disk faster. Therefore, we allow the first few passes of 1293 * spa_sync() to allocate new blocks, but force rewrites after that. 1294 * There should only be a handful of blocks after pass 1 in any case. 1295 */ 1296 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1297 BP_GET_PSIZE(bp) == psize && 1298 pass >= zfs_sync_pass_rewrite) { 1299 ASSERT(psize != 0); 1300 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1301 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1302 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1303 } else { 1304 BP_ZERO(bp); 1305 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1306 } 1307 1308 if (psize == 0) { 1309 if (zio->io_bp_orig.blk_birth != 0 && 1310 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1311 BP_SET_LSIZE(bp, lsize); 1312 BP_SET_TYPE(bp, zp->zp_type); 1313 BP_SET_LEVEL(bp, zp->zp_level); 1314 BP_SET_BIRTH(bp, zio->io_txg, 0); 1315 } 1316 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1317 } else { 1318 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1319 BP_SET_LSIZE(bp, lsize); 1320 BP_SET_TYPE(bp, zp->zp_type); 1321 BP_SET_LEVEL(bp, zp->zp_level); 1322 BP_SET_PSIZE(bp, psize); 1323 BP_SET_COMPRESS(bp, compress); 1324 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1325 BP_SET_DEDUP(bp, zp->zp_dedup); 1326 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1327 if (zp->zp_dedup) { 1328 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1329 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1330 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1331 } 1332 if (zp->zp_nopwrite) { 1333 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1334 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1335 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1336 } 1337 } 1338 1339 return (ZIO_PIPELINE_CONTINUE); 1340} 1341 1342static int 1343zio_free_bp_init(zio_t *zio) 1344{ 1345 blkptr_t *bp = zio->io_bp; 1346 1347 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1348 if (BP_GET_DEDUP(bp)) 1349 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1350 } 1351 1352 return (ZIO_PIPELINE_CONTINUE); 1353} 1354 1355/* 1356 * ========================================================================== 1357 * Execute the I/O pipeline 1358 * ========================================================================== 1359 */ 1360 1361static void 1362zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1363{ 1364 spa_t *spa = zio->io_spa; 1365 zio_type_t t = zio->io_type; 1366 int flags = (cutinline ? TQ_FRONT : 0); 1367 1368 ASSERT(q == ZIO_TASKQ_ISSUE || q == ZIO_TASKQ_INTERRUPT); 1369 1370 /* 1371 * If we're a config writer or a probe, the normal issue and 1372 * interrupt threads may all be blocked waiting for the config lock. 1373 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1374 */ 1375 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1376 t = ZIO_TYPE_NULL; 1377 1378 /* 1379 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1380 */ 1381 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1382 t = ZIO_TYPE_NULL; 1383 1384 /* 1385 * If this is a high priority I/O, then use the high priority taskq if 1386 * available. 1387 */ 1388 if (zio->io_priority == ZIO_PRIORITY_NOW && 1389 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1390 q++; 1391 1392 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1393 1394 /* 1395 * NB: We are assuming that the zio can only be dispatched 1396 * to a single taskq at a time. It would be a grievous error 1397 * to dispatch the zio to another taskq at the same time. 1398 */ 1399#if defined(illumos) || !defined(_KERNEL) 1400 ASSERT(zio->io_tqent.tqent_next == NULL); 1401#else 1402 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 1403#endif 1404 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1405 flags, &zio->io_tqent); 1406} 1407 1408static boolean_t 1409zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1410{ 1411 kthread_t *executor = zio->io_executor; 1412 spa_t *spa = zio->io_spa; 1413 1414 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1415 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1416 uint_t i; 1417 for (i = 0; i < tqs->stqs_count; i++) { 1418 if (taskq_member(tqs->stqs_taskq[i], executor)) 1419 return (B_TRUE); 1420 } 1421 } 1422 1423 return (B_FALSE); 1424} 1425 1426static int 1427zio_issue_async(zio_t *zio) 1428{ 1429 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1430 1431 return (ZIO_PIPELINE_STOP); 1432} 1433 1434void 1435zio_interrupt(zio_t *zio) 1436{ 1437 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1438} 1439 1440/* 1441 * Execute the I/O pipeline until one of the following occurs: 1442 * 1443 * (1) the I/O completes 1444 * (2) the pipeline stalls waiting for dependent child I/Os 1445 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1446 * (4) the I/O is delegated by vdev-level caching or aggregation 1447 * (5) the I/O is deferred due to vdev-level queueing 1448 * (6) the I/O is handed off to another thread. 1449 * 1450 * In all cases, the pipeline stops whenever there's no CPU work; it never 1451 * burns a thread in cv_wait(). 1452 * 1453 * There's no locking on io_stage because there's no legitimate way 1454 * for multiple threads to be attempting to process the same I/O. 1455 */ 1456static zio_pipe_stage_t *zio_pipeline[]; 1457 1458void 1459zio_execute(zio_t *zio) 1460{ 1461 zio->io_executor = curthread; 1462 1463 while (zio->io_stage < ZIO_STAGE_DONE) { 1464 enum zio_stage pipeline = zio->io_pipeline; 1465 enum zio_stage stage = zio->io_stage; 1466 int rv; 1467 1468 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1469 ASSERT(ISP2(stage)); 1470 ASSERT(zio->io_stall == NULL); 1471 1472 do { 1473 stage <<= 1; 1474 } while ((stage & pipeline) == 0); 1475 1476 ASSERT(stage <= ZIO_STAGE_DONE); 1477 1478 /* 1479 * If we are in interrupt context and this pipeline stage 1480 * will grab a config lock that is held across I/O, 1481 * or may wait for an I/O that needs an interrupt thread 1482 * to complete, issue async to avoid deadlock. 1483 * 1484 * For VDEV_IO_START, we cut in line so that the io will 1485 * be sent to disk promptly. 1486 */ 1487 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1488 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1489 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1490 zio_requeue_io_start_cut_in_line : B_FALSE; 1491 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1492 return; 1493 } 1494 1495 zio->io_stage = stage; 1496 rv = zio_pipeline[highbit64(stage) - 1](zio); 1497 1498 if (rv == ZIO_PIPELINE_STOP) 1499 return; 1500 1501 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1502 } 1503} 1504 1505/* 1506 * ========================================================================== 1507 * Initiate I/O, either sync or async 1508 * ========================================================================== 1509 */ 1510int 1511zio_wait(zio_t *zio) 1512{ 1513 int error; 1514 1515 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1516 ASSERT(zio->io_executor == NULL); 1517 1518 zio->io_waiter = curthread; 1519 1520 zio_execute(zio); 1521 1522 mutex_enter(&zio->io_lock); 1523 while (zio->io_executor != NULL) 1524 cv_wait(&zio->io_cv, &zio->io_lock); 1525 mutex_exit(&zio->io_lock); 1526 1527 error = zio->io_error; 1528 zio_destroy(zio); 1529 1530 return (error); 1531} 1532 1533void 1534zio_nowait(zio_t *zio) 1535{ 1536 ASSERT(zio->io_executor == NULL); 1537 1538 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1539 zio_unique_parent(zio) == NULL) { 1540 /* 1541 * This is a logical async I/O with no parent to wait for it. 1542 * We add it to the spa_async_root_zio "Godfather" I/O which 1543 * will ensure they complete prior to unloading the pool. 1544 */ 1545 spa_t *spa = zio->io_spa; 1546 1547 zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1548 } 1549 1550 zio_execute(zio); 1551} 1552 1553/* 1554 * ========================================================================== 1555 * Reexecute or suspend/resume failed I/O 1556 * ========================================================================== 1557 */ 1558 1559static void 1560zio_reexecute(zio_t *pio) 1561{ 1562 zio_t *cio, *cio_next; 1563 1564 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1565 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1566 ASSERT(pio->io_gang_leader == NULL); 1567 ASSERT(pio->io_gang_tree == NULL); 1568 1569 pio->io_flags = pio->io_orig_flags; 1570 pio->io_stage = pio->io_orig_stage; 1571 pio->io_pipeline = pio->io_orig_pipeline; 1572 pio->io_reexecute = 0; 1573 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1574 pio->io_error = 0; 1575 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1576 pio->io_state[w] = 0; 1577 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1578 pio->io_child_error[c] = 0; 1579 1580 if (IO_IS_ALLOCATING(pio)) 1581 BP_ZERO(pio->io_bp); 1582 1583 /* 1584 * As we reexecute pio's children, new children could be created. 1585 * New children go to the head of pio's io_child_list, however, 1586 * so we will (correctly) not reexecute them. The key is that 1587 * the remainder of pio's io_child_list, from 'cio_next' onward, 1588 * cannot be affected by any side effects of reexecuting 'cio'. 1589 */ 1590 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1591 cio_next = zio_walk_children(pio); 1592 mutex_enter(&pio->io_lock); 1593 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1594 pio->io_children[cio->io_child_type][w]++; 1595 mutex_exit(&pio->io_lock); 1596 zio_reexecute(cio); 1597 } 1598 1599 /* 1600 * Now that all children have been reexecuted, execute the parent. 1601 * We don't reexecute "The Godfather" I/O here as it's the 1602 * responsibility of the caller to wait on him. 1603 */ 1604 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1605 zio_execute(pio); 1606} 1607 1608void 1609zio_suspend(spa_t *spa, zio_t *zio) 1610{ 1611 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1612 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1613 "failure and the failure mode property for this pool " 1614 "is set to panic.", spa_name(spa)); 1615 1616 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1617 1618 mutex_enter(&spa->spa_suspend_lock); 1619 1620 if (spa->spa_suspend_zio_root == NULL) 1621 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1622 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1623 ZIO_FLAG_GODFATHER); 1624 1625 spa->spa_suspended = B_TRUE; 1626 1627 if (zio != NULL) { 1628 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1629 ASSERT(zio != spa->spa_suspend_zio_root); 1630 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1631 ASSERT(zio_unique_parent(zio) == NULL); 1632 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1633 zio_add_child(spa->spa_suspend_zio_root, zio); 1634 } 1635 1636 mutex_exit(&spa->spa_suspend_lock); 1637} 1638 1639int 1640zio_resume(spa_t *spa) 1641{ 1642 zio_t *pio; 1643 1644 /* 1645 * Reexecute all previously suspended i/o. 1646 */ 1647 mutex_enter(&spa->spa_suspend_lock); 1648 spa->spa_suspended = B_FALSE; 1649 cv_broadcast(&spa->spa_suspend_cv); 1650 pio = spa->spa_suspend_zio_root; 1651 spa->spa_suspend_zio_root = NULL; 1652 mutex_exit(&spa->spa_suspend_lock); 1653 1654 if (pio == NULL) 1655 return (0); 1656 1657 zio_reexecute(pio); 1658 return (zio_wait(pio)); 1659} 1660 1661void 1662zio_resume_wait(spa_t *spa) 1663{ 1664 mutex_enter(&spa->spa_suspend_lock); 1665 while (spa_suspended(spa)) 1666 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1667 mutex_exit(&spa->spa_suspend_lock); 1668} 1669 1670/* 1671 * ========================================================================== 1672 * Gang blocks. 1673 * 1674 * A gang block is a collection of small blocks that looks to the DMU 1675 * like one large block. When zio_dva_allocate() cannot find a block 1676 * of the requested size, due to either severe fragmentation or the pool 1677 * being nearly full, it calls zio_write_gang_block() to construct the 1678 * block from smaller fragments. 1679 * 1680 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1681 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1682 * an indirect block: it's an array of block pointers. It consumes 1683 * only one sector and hence is allocatable regardless of fragmentation. 1684 * The gang header's bps point to its gang members, which hold the data. 1685 * 1686 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1687 * as the verifier to ensure uniqueness of the SHA256 checksum. 1688 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1689 * not the gang header. This ensures that data block signatures (needed for 1690 * deduplication) are independent of how the block is physically stored. 1691 * 1692 * Gang blocks can be nested: a gang member may itself be a gang block. 1693 * Thus every gang block is a tree in which root and all interior nodes are 1694 * gang headers, and the leaves are normal blocks that contain user data. 1695 * The root of the gang tree is called the gang leader. 1696 * 1697 * To perform any operation (read, rewrite, free, claim) on a gang block, 1698 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1699 * in the io_gang_tree field of the original logical i/o by recursively 1700 * reading the gang leader and all gang headers below it. This yields 1701 * an in-core tree containing the contents of every gang header and the 1702 * bps for every constituent of the gang block. 1703 * 1704 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1705 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1706 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1707 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1708 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1709 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1710 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1711 * of the gang header plus zio_checksum_compute() of the data to update the 1712 * gang header's blk_cksum as described above. 1713 * 1714 * The two-phase assemble/issue model solves the problem of partial failure -- 1715 * what if you'd freed part of a gang block but then couldn't read the 1716 * gang header for another part? Assembling the entire gang tree first 1717 * ensures that all the necessary gang header I/O has succeeded before 1718 * starting the actual work of free, claim, or write. Once the gang tree 1719 * is assembled, free and claim are in-memory operations that cannot fail. 1720 * 1721 * In the event that a gang write fails, zio_dva_unallocate() walks the 1722 * gang tree to immediately free (i.e. insert back into the space map) 1723 * everything we've allocated. This ensures that we don't get ENOSPC 1724 * errors during repeated suspend/resume cycles due to a flaky device. 1725 * 1726 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1727 * the gang tree, we won't modify the block, so we can safely defer the free 1728 * (knowing that the block is still intact). If we *can* assemble the gang 1729 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1730 * each constituent bp and we can allocate a new block on the next sync pass. 1731 * 1732 * In all cases, the gang tree allows complete recovery from partial failure. 1733 * ========================================================================== 1734 */ 1735 1736static zio_t * 1737zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1738{ 1739 if (gn != NULL) 1740 return (pio); 1741 1742 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1743 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1744 &pio->io_bookmark)); 1745} 1746 1747zio_t * 1748zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1749{ 1750 zio_t *zio; 1751 1752 if (gn != NULL) { 1753 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1754 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1755 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1756 /* 1757 * As we rewrite each gang header, the pipeline will compute 1758 * a new gang block header checksum for it; but no one will 1759 * compute a new data checksum, so we do that here. The one 1760 * exception is the gang leader: the pipeline already computed 1761 * its data checksum because that stage precedes gang assembly. 1762 * (Presently, nothing actually uses interior data checksums; 1763 * this is just good hygiene.) 1764 */ 1765 if (gn != pio->io_gang_leader->io_gang_tree) { 1766 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1767 data, BP_GET_PSIZE(bp)); 1768 } 1769 /* 1770 * If we are here to damage data for testing purposes, 1771 * leave the GBH alone so that we can detect the damage. 1772 */ 1773 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1774 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1775 } else { 1776 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1777 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1778 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1779 } 1780 1781 return (zio); 1782} 1783 1784/* ARGSUSED */ 1785zio_t * 1786zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1787{ 1788 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1789 BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp), 1790 ZIO_GANG_CHILD_FLAGS(pio))); 1791} 1792 1793/* ARGSUSED */ 1794zio_t * 1795zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1796{ 1797 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1798 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1799} 1800 1801static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1802 NULL, 1803 zio_read_gang, 1804 zio_rewrite_gang, 1805 zio_free_gang, 1806 zio_claim_gang, 1807 NULL 1808}; 1809 1810static void zio_gang_tree_assemble_done(zio_t *zio); 1811 1812static zio_gang_node_t * 1813zio_gang_node_alloc(zio_gang_node_t **gnpp) 1814{ 1815 zio_gang_node_t *gn; 1816 1817 ASSERT(*gnpp == NULL); 1818 1819 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1820 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1821 *gnpp = gn; 1822 1823 return (gn); 1824} 1825 1826static void 1827zio_gang_node_free(zio_gang_node_t **gnpp) 1828{ 1829 zio_gang_node_t *gn = *gnpp; 1830 1831 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1832 ASSERT(gn->gn_child[g] == NULL); 1833 1834 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1835 kmem_free(gn, sizeof (*gn)); 1836 *gnpp = NULL; 1837} 1838 1839static void 1840zio_gang_tree_free(zio_gang_node_t **gnpp) 1841{ 1842 zio_gang_node_t *gn = *gnpp; 1843 1844 if (gn == NULL) 1845 return; 1846 1847 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1848 zio_gang_tree_free(&gn->gn_child[g]); 1849 1850 zio_gang_node_free(gnpp); 1851} 1852 1853static void 1854zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1855{ 1856 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1857 1858 ASSERT(gio->io_gang_leader == gio); 1859 ASSERT(BP_IS_GANG(bp)); 1860 1861 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1862 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1863 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1864} 1865 1866static void 1867zio_gang_tree_assemble_done(zio_t *zio) 1868{ 1869 zio_t *gio = zio->io_gang_leader; 1870 zio_gang_node_t *gn = zio->io_private; 1871 blkptr_t *bp = zio->io_bp; 1872 1873 ASSERT(gio == zio_unique_parent(zio)); 1874 ASSERT(zio->io_child_count == 0); 1875 1876 if (zio->io_error) 1877 return; 1878 1879 if (BP_SHOULD_BYTESWAP(bp)) 1880 byteswap_uint64_array(zio->io_data, zio->io_size); 1881 1882 ASSERT(zio->io_data == gn->gn_gbh); 1883 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1884 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1885 1886 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1887 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1888 if (!BP_IS_GANG(gbp)) 1889 continue; 1890 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1891 } 1892} 1893 1894static void 1895zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1896{ 1897 zio_t *gio = pio->io_gang_leader; 1898 zio_t *zio; 1899 1900 ASSERT(BP_IS_GANG(bp) == !!gn); 1901 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1902 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1903 1904 /* 1905 * If you're a gang header, your data is in gn->gn_gbh. 1906 * If you're a gang member, your data is in 'data' and gn == NULL. 1907 */ 1908 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1909 1910 if (gn != NULL) { 1911 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1912 1913 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1914 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1915 if (BP_IS_HOLE(gbp)) 1916 continue; 1917 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1918 data = (char *)data + BP_GET_PSIZE(gbp); 1919 } 1920 } 1921 1922 if (gn == gio->io_gang_tree && gio->io_data != NULL) 1923 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1924 1925 if (zio != pio) 1926 zio_nowait(zio); 1927} 1928 1929static int 1930zio_gang_assemble(zio_t *zio) 1931{ 1932 blkptr_t *bp = zio->io_bp; 1933 1934 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1935 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1936 1937 zio->io_gang_leader = zio; 1938 1939 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1940 1941 return (ZIO_PIPELINE_CONTINUE); 1942} 1943 1944static int 1945zio_gang_issue(zio_t *zio) 1946{ 1947 blkptr_t *bp = zio->io_bp; 1948 1949 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1950 return (ZIO_PIPELINE_STOP); 1951 1952 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1953 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1954 1955 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1956 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1957 else 1958 zio_gang_tree_free(&zio->io_gang_tree); 1959 1960 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1961 1962 return (ZIO_PIPELINE_CONTINUE); 1963} 1964 1965static void 1966zio_write_gang_member_ready(zio_t *zio) 1967{ 1968 zio_t *pio = zio_unique_parent(zio); 1969 zio_t *gio = zio->io_gang_leader; 1970 dva_t *cdva = zio->io_bp->blk_dva; 1971 dva_t *pdva = pio->io_bp->blk_dva; 1972 uint64_t asize; 1973 1974 if (BP_IS_HOLE(zio->io_bp)) 1975 return; 1976 1977 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1978 1979 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1980 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1981 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1982 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1983 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1984 1985 mutex_enter(&pio->io_lock); 1986 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1987 ASSERT(DVA_GET_GANG(&pdva[d])); 1988 asize = DVA_GET_ASIZE(&pdva[d]); 1989 asize += DVA_GET_ASIZE(&cdva[d]); 1990 DVA_SET_ASIZE(&pdva[d], asize); 1991 } 1992 mutex_exit(&pio->io_lock); 1993} 1994 1995static int 1996zio_write_gang_block(zio_t *pio) 1997{ 1998 spa_t *spa = pio->io_spa; 1999 blkptr_t *bp = pio->io_bp; 2000 zio_t *gio = pio->io_gang_leader; 2001 zio_t *zio; 2002 zio_gang_node_t *gn, **gnpp; 2003 zio_gbh_phys_t *gbh; 2004 uint64_t txg = pio->io_txg; 2005 uint64_t resid = pio->io_size; 2006 uint64_t lsize; 2007 int copies = gio->io_prop.zp_copies; 2008 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2009 zio_prop_t zp; 2010 int error; 2011 2012 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 2013 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 2014 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 2015 if (error) { 2016 pio->io_error = error; 2017 return (ZIO_PIPELINE_CONTINUE); 2018 } 2019 2020 if (pio == gio) { 2021 gnpp = &gio->io_gang_tree; 2022 } else { 2023 gnpp = pio->io_private; 2024 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2025 } 2026 2027 gn = zio_gang_node_alloc(gnpp); 2028 gbh = gn->gn_gbh; 2029 bzero(gbh, SPA_GANGBLOCKSIZE); 2030 2031 /* 2032 * Create the gang header. 2033 */ 2034 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 2035 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2036 2037 /* 2038 * Create and nowait the gang children. 2039 */ 2040 for (int g = 0; resid != 0; resid -= lsize, g++) { 2041 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2042 SPA_MINBLOCKSIZE); 2043 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2044 2045 zp.zp_checksum = gio->io_prop.zp_checksum; 2046 zp.zp_compress = ZIO_COMPRESS_OFF; 2047 zp.zp_type = DMU_OT_NONE; 2048 zp.zp_level = 0; 2049 zp.zp_copies = gio->io_prop.zp_copies; 2050 zp.zp_dedup = B_FALSE; 2051 zp.zp_dedup_verify = B_FALSE; 2052 zp.zp_nopwrite = B_FALSE; 2053 2054 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2055 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 2056 zio_write_gang_member_ready, NULL, NULL, &gn->gn_child[g], 2057 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2058 &pio->io_bookmark)); 2059 } 2060 2061 /* 2062 * Set pio's pipeline to just wait for zio to finish. 2063 */ 2064 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2065 2066 zio_nowait(zio); 2067 2068 return (ZIO_PIPELINE_CONTINUE); 2069} 2070 2071/* 2072 * The zio_nop_write stage in the pipeline determines if allocating 2073 * a new bp is necessary. By leveraging a cryptographically secure checksum, 2074 * such as SHA256, we can compare the checksums of the new data and the old 2075 * to determine if allocating a new block is required. The nopwrite 2076 * feature can handle writes in either syncing or open context (i.e. zil 2077 * writes) and as a result is mutually exclusive with dedup. 2078 */ 2079static int 2080zio_nop_write(zio_t *zio) 2081{ 2082 blkptr_t *bp = zio->io_bp; 2083 blkptr_t *bp_orig = &zio->io_bp_orig; 2084 zio_prop_t *zp = &zio->io_prop; 2085 2086 ASSERT(BP_GET_LEVEL(bp) == 0); 2087 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2088 ASSERT(zp->zp_nopwrite); 2089 ASSERT(!zp->zp_dedup); 2090 ASSERT(zio->io_bp_override == NULL); 2091 ASSERT(IO_IS_ALLOCATING(zio)); 2092 2093 /* 2094 * Check to see if the original bp and the new bp have matching 2095 * characteristics (i.e. same checksum, compression algorithms, etc). 2096 * If they don't then just continue with the pipeline which will 2097 * allocate a new bp. 2098 */ 2099 if (BP_IS_HOLE(bp_orig) || 2100 !zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_dedup || 2101 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2102 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2103 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2104 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2105 return (ZIO_PIPELINE_CONTINUE); 2106 2107 /* 2108 * If the checksums match then reset the pipeline so that we 2109 * avoid allocating a new bp and issuing any I/O. 2110 */ 2111 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2112 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup); 2113 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2114 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2115 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2116 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2117 sizeof (uint64_t)) == 0); 2118 2119 *bp = *bp_orig; 2120 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2121 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2122 } 2123 2124 return (ZIO_PIPELINE_CONTINUE); 2125} 2126 2127/* 2128 * ========================================================================== 2129 * Dedup 2130 * ========================================================================== 2131 */ 2132static void 2133zio_ddt_child_read_done(zio_t *zio) 2134{ 2135 blkptr_t *bp = zio->io_bp; 2136 ddt_entry_t *dde = zio->io_private; 2137 ddt_phys_t *ddp; 2138 zio_t *pio = zio_unique_parent(zio); 2139 2140 mutex_enter(&pio->io_lock); 2141 ddp = ddt_phys_select(dde, bp); 2142 if (zio->io_error == 0) 2143 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2144 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 2145 dde->dde_repair_data = zio->io_data; 2146 else 2147 zio_buf_free(zio->io_data, zio->io_size); 2148 mutex_exit(&pio->io_lock); 2149} 2150 2151static int 2152zio_ddt_read_start(zio_t *zio) 2153{ 2154 blkptr_t *bp = zio->io_bp; 2155 2156 ASSERT(BP_GET_DEDUP(bp)); 2157 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2158 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2159 2160 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2161 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2162 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2163 ddt_phys_t *ddp = dde->dde_phys; 2164 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2165 blkptr_t blk; 2166 2167 ASSERT(zio->io_vsd == NULL); 2168 zio->io_vsd = dde; 2169 2170 if (ddp_self == NULL) 2171 return (ZIO_PIPELINE_CONTINUE); 2172 2173 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2174 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2175 continue; 2176 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2177 &blk); 2178 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2179 zio_buf_alloc(zio->io_size), zio->io_size, 2180 zio_ddt_child_read_done, dde, zio->io_priority, 2181 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 2182 &zio->io_bookmark)); 2183 } 2184 return (ZIO_PIPELINE_CONTINUE); 2185 } 2186 2187 zio_nowait(zio_read(zio, zio->io_spa, bp, 2188 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 2189 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2190 2191 return (ZIO_PIPELINE_CONTINUE); 2192} 2193 2194static int 2195zio_ddt_read_done(zio_t *zio) 2196{ 2197 blkptr_t *bp = zio->io_bp; 2198 2199 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 2200 return (ZIO_PIPELINE_STOP); 2201 2202 ASSERT(BP_GET_DEDUP(bp)); 2203 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2204 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2205 2206 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2207 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2208 ddt_entry_t *dde = zio->io_vsd; 2209 if (ddt == NULL) { 2210 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2211 return (ZIO_PIPELINE_CONTINUE); 2212 } 2213 if (dde == NULL) { 2214 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2215 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2216 return (ZIO_PIPELINE_STOP); 2217 } 2218 if (dde->dde_repair_data != NULL) { 2219 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 2220 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2221 } 2222 ddt_repair_done(ddt, dde); 2223 zio->io_vsd = NULL; 2224 } 2225 2226 ASSERT(zio->io_vsd == NULL); 2227 2228 return (ZIO_PIPELINE_CONTINUE); 2229} 2230 2231static boolean_t 2232zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2233{ 2234 spa_t *spa = zio->io_spa; 2235 2236 /* 2237 * Note: we compare the original data, not the transformed data, 2238 * because when zio->io_bp is an override bp, we will not have 2239 * pushed the I/O transforms. That's an important optimization 2240 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2241 */ 2242 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2243 zio_t *lio = dde->dde_lead_zio[p]; 2244 2245 if (lio != NULL) { 2246 return (lio->io_orig_size != zio->io_orig_size || 2247 bcmp(zio->io_orig_data, lio->io_orig_data, 2248 zio->io_orig_size) != 0); 2249 } 2250 } 2251 2252 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2253 ddt_phys_t *ddp = &dde->dde_phys[p]; 2254 2255 if (ddp->ddp_phys_birth != 0) { 2256 arc_buf_t *abuf = NULL; 2257 arc_flags_t aflags = ARC_FLAG_WAIT; 2258 blkptr_t blk = *zio->io_bp; 2259 int error; 2260 2261 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2262 2263 ddt_exit(ddt); 2264 2265 error = arc_read(NULL, spa, &blk, 2266 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2267 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2268 &aflags, &zio->io_bookmark); 2269 2270 if (error == 0) { 2271 if (arc_buf_size(abuf) != zio->io_orig_size || 2272 bcmp(abuf->b_data, zio->io_orig_data, 2273 zio->io_orig_size) != 0) 2274 error = SET_ERROR(EEXIST); 2275 VERIFY(arc_buf_remove_ref(abuf, &abuf)); 2276 } 2277 2278 ddt_enter(ddt); 2279 return (error != 0); 2280 } 2281 } 2282 2283 return (B_FALSE); 2284} 2285 2286static void 2287zio_ddt_child_write_ready(zio_t *zio) 2288{ 2289 int p = zio->io_prop.zp_copies; 2290 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2291 ddt_entry_t *dde = zio->io_private; 2292 ddt_phys_t *ddp = &dde->dde_phys[p]; 2293 zio_t *pio; 2294 2295 if (zio->io_error) 2296 return; 2297 2298 ddt_enter(ddt); 2299 2300 ASSERT(dde->dde_lead_zio[p] == zio); 2301 2302 ddt_phys_fill(ddp, zio->io_bp); 2303 2304 while ((pio = zio_walk_parents(zio)) != NULL) 2305 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2306 2307 ddt_exit(ddt); 2308} 2309 2310static void 2311zio_ddt_child_write_done(zio_t *zio) 2312{ 2313 int p = zio->io_prop.zp_copies; 2314 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2315 ddt_entry_t *dde = zio->io_private; 2316 ddt_phys_t *ddp = &dde->dde_phys[p]; 2317 2318 ddt_enter(ddt); 2319 2320 ASSERT(ddp->ddp_refcnt == 0); 2321 ASSERT(dde->dde_lead_zio[p] == zio); 2322 dde->dde_lead_zio[p] = NULL; 2323 2324 if (zio->io_error == 0) { 2325 while (zio_walk_parents(zio) != NULL) 2326 ddt_phys_addref(ddp); 2327 } else { 2328 ddt_phys_clear(ddp); 2329 } 2330 2331 ddt_exit(ddt); 2332} 2333 2334static void 2335zio_ddt_ditto_write_done(zio_t *zio) 2336{ 2337 int p = DDT_PHYS_DITTO; 2338 zio_prop_t *zp = &zio->io_prop; 2339 blkptr_t *bp = zio->io_bp; 2340 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2341 ddt_entry_t *dde = zio->io_private; 2342 ddt_phys_t *ddp = &dde->dde_phys[p]; 2343 ddt_key_t *ddk = &dde->dde_key; 2344 2345 ddt_enter(ddt); 2346 2347 ASSERT(ddp->ddp_refcnt == 0); 2348 ASSERT(dde->dde_lead_zio[p] == zio); 2349 dde->dde_lead_zio[p] = NULL; 2350 2351 if (zio->io_error == 0) { 2352 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2353 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2354 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2355 if (ddp->ddp_phys_birth != 0) 2356 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2357 ddt_phys_fill(ddp, bp); 2358 } 2359 2360 ddt_exit(ddt); 2361} 2362 2363static int 2364zio_ddt_write(zio_t *zio) 2365{ 2366 spa_t *spa = zio->io_spa; 2367 blkptr_t *bp = zio->io_bp; 2368 uint64_t txg = zio->io_txg; 2369 zio_prop_t *zp = &zio->io_prop; 2370 int p = zp->zp_copies; 2371 int ditto_copies; 2372 zio_t *cio = NULL; 2373 zio_t *dio = NULL; 2374 ddt_t *ddt = ddt_select(spa, bp); 2375 ddt_entry_t *dde; 2376 ddt_phys_t *ddp; 2377 2378 ASSERT(BP_GET_DEDUP(bp)); 2379 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2380 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2381 2382 ddt_enter(ddt); 2383 dde = ddt_lookup(ddt, bp, B_TRUE); 2384 ddp = &dde->dde_phys[p]; 2385 2386 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2387 /* 2388 * If we're using a weak checksum, upgrade to a strong checksum 2389 * and try again. If we're already using a strong checksum, 2390 * we can't resolve it, so just convert to an ordinary write. 2391 * (And automatically e-mail a paper to Nature?) 2392 */ 2393 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2394 zp->zp_checksum = spa_dedup_checksum(spa); 2395 zio_pop_transforms(zio); 2396 zio->io_stage = ZIO_STAGE_OPEN; 2397 BP_ZERO(bp); 2398 } else { 2399 zp->zp_dedup = B_FALSE; 2400 } 2401 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2402 ddt_exit(ddt); 2403 return (ZIO_PIPELINE_CONTINUE); 2404 } 2405 2406 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2407 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2408 2409 if (ditto_copies > ddt_ditto_copies_present(dde) && 2410 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2411 zio_prop_t czp = *zp; 2412 2413 czp.zp_copies = ditto_copies; 2414 2415 /* 2416 * If we arrived here with an override bp, we won't have run 2417 * the transform stack, so we won't have the data we need to 2418 * generate a child i/o. So, toss the override bp and restart. 2419 * This is safe, because using the override bp is just an 2420 * optimization; and it's rare, so the cost doesn't matter. 2421 */ 2422 if (zio->io_bp_override) { 2423 zio_pop_transforms(zio); 2424 zio->io_stage = ZIO_STAGE_OPEN; 2425 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2426 zio->io_bp_override = NULL; 2427 BP_ZERO(bp); 2428 ddt_exit(ddt); 2429 return (ZIO_PIPELINE_CONTINUE); 2430 } 2431 2432 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2433 zio->io_orig_size, &czp, NULL, NULL, 2434 zio_ddt_ditto_write_done, dde, zio->io_priority, 2435 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2436 2437 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2438 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2439 } 2440 2441 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2442 if (ddp->ddp_phys_birth != 0) 2443 ddt_bp_fill(ddp, bp, txg); 2444 if (dde->dde_lead_zio[p] != NULL) 2445 zio_add_child(zio, dde->dde_lead_zio[p]); 2446 else 2447 ddt_phys_addref(ddp); 2448 } else if (zio->io_bp_override) { 2449 ASSERT(bp->blk_birth == txg); 2450 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2451 ddt_phys_fill(ddp, bp); 2452 ddt_phys_addref(ddp); 2453 } else { 2454 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2455 zio->io_orig_size, zp, zio_ddt_child_write_ready, NULL, 2456 zio_ddt_child_write_done, dde, zio->io_priority, 2457 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2458 2459 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2460 dde->dde_lead_zio[p] = cio; 2461 } 2462 2463 ddt_exit(ddt); 2464 2465 if (cio) 2466 zio_nowait(cio); 2467 if (dio) 2468 zio_nowait(dio); 2469 2470 return (ZIO_PIPELINE_CONTINUE); 2471} 2472 2473ddt_entry_t *freedde; /* for debugging */ 2474 2475static int 2476zio_ddt_free(zio_t *zio) 2477{ 2478 spa_t *spa = zio->io_spa; 2479 blkptr_t *bp = zio->io_bp; 2480 ddt_t *ddt = ddt_select(spa, bp); 2481 ddt_entry_t *dde; 2482 ddt_phys_t *ddp; 2483 2484 ASSERT(BP_GET_DEDUP(bp)); 2485 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2486 2487 ddt_enter(ddt); 2488 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2489 ddp = ddt_phys_select(dde, bp); 2490 ddt_phys_decref(ddp); 2491 ddt_exit(ddt); 2492 2493 return (ZIO_PIPELINE_CONTINUE); 2494} 2495 2496/* 2497 * ========================================================================== 2498 * Allocate and free blocks 2499 * ========================================================================== 2500 */ 2501static int 2502zio_dva_allocate(zio_t *zio) 2503{ 2504 spa_t *spa = zio->io_spa; 2505 metaslab_class_t *mc = spa_normal_class(spa); 2506 blkptr_t *bp = zio->io_bp; 2507 int error; 2508 int flags = 0; 2509 2510 if (zio->io_gang_leader == NULL) { 2511 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2512 zio->io_gang_leader = zio; 2513 } 2514 2515 ASSERT(BP_IS_HOLE(bp)); 2516 ASSERT0(BP_GET_NDVAS(bp)); 2517 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2518 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2519 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2520 2521 /* 2522 * The dump device does not support gang blocks so allocation on 2523 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid 2524 * the "fast" gang feature. 2525 */ 2526 flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; 2527 flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? 2528 METASLAB_GANG_CHILD : 0; 2529 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2530 zio->io_prop.zp_copies, zio->io_txg, NULL, flags); 2531 2532 if (error) { 2533 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2534 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2535 error); 2536 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2537 return (zio_write_gang_block(zio)); 2538 zio->io_error = error; 2539 } 2540 2541 return (ZIO_PIPELINE_CONTINUE); 2542} 2543 2544static int 2545zio_dva_free(zio_t *zio) 2546{ 2547 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2548 2549 return (ZIO_PIPELINE_CONTINUE); 2550} 2551 2552static int 2553zio_dva_claim(zio_t *zio) 2554{ 2555 int error; 2556 2557 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2558 if (error) 2559 zio->io_error = error; 2560 2561 return (ZIO_PIPELINE_CONTINUE); 2562} 2563 2564/* 2565 * Undo an allocation. This is used by zio_done() when an I/O fails 2566 * and we want to give back the block we just allocated. 2567 * This handles both normal blocks and gang blocks. 2568 */ 2569static void 2570zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2571{ 2572 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2573 ASSERT(zio->io_bp_override == NULL); 2574 2575 if (!BP_IS_HOLE(bp)) 2576 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2577 2578 if (gn != NULL) { 2579 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2580 zio_dva_unallocate(zio, gn->gn_child[g], 2581 &gn->gn_gbh->zg_blkptr[g]); 2582 } 2583 } 2584} 2585 2586/* 2587 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2588 */ 2589int 2590zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2591 uint64_t size, boolean_t use_slog) 2592{ 2593 int error = 1; 2594 2595 ASSERT(txg > spa_syncing_txg(spa)); 2596 2597 /* 2598 * ZIL blocks are always contiguous (i.e. not gang blocks) so we 2599 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" 2600 * when allocating them. 2601 */ 2602 if (use_slog) { 2603 error = metaslab_alloc(spa, spa_log_class(spa), size, 2604 new_bp, 1, txg, old_bp, 2605 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2606 } 2607 2608 if (error) { 2609 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2610 new_bp, 1, txg, old_bp, 2611 METASLAB_HINTBP_AVOID); 2612 } 2613 2614 if (error == 0) { 2615 BP_SET_LSIZE(new_bp, size); 2616 BP_SET_PSIZE(new_bp, size); 2617 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2618 BP_SET_CHECKSUM(new_bp, 2619 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2620 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2621 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2622 BP_SET_LEVEL(new_bp, 0); 2623 BP_SET_DEDUP(new_bp, 0); 2624 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2625 } 2626 2627 return (error); 2628} 2629 2630/* 2631 * Free an intent log block. 2632 */ 2633void 2634zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2635{ 2636 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2637 ASSERT(!BP_IS_GANG(bp)); 2638 2639 zio_free(spa, txg, bp); 2640} 2641 2642/* 2643 * ========================================================================== 2644 * Read, write and delete to physical devices 2645 * ========================================================================== 2646 */ 2647static int 2648zio_vdev_io_start(zio_t *zio) 2649{ 2650 vdev_t *vd = zio->io_vd; 2651 uint64_t align; 2652 spa_t *spa = zio->io_spa; 2653 int ret; 2654 2655 ASSERT(zio->io_error == 0); 2656 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2657 2658 if (vd == NULL) { 2659 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2660 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2661 2662 /* 2663 * The mirror_ops handle multiple DVAs in a single BP. 2664 */ 2665 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2666 } 2667 2668 if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_FREE && 2669 zio->io_priority == ZIO_PRIORITY_NOW) { 2670 trim_map_free(vd, zio->io_offset, zio->io_size, zio->io_txg); 2671 return (ZIO_PIPELINE_CONTINUE); 2672 } 2673 2674 /* 2675 * We keep track of time-sensitive I/Os so that the scan thread 2676 * can quickly react to certain workloads. In particular, we care 2677 * about non-scrubbing, top-level reads and writes with the following 2678 * characteristics: 2679 * - synchronous writes of user data to non-slog devices 2680 * - any reads of user data 2681 * When these conditions are met, adjust the timestamp of spa_last_io 2682 * which allows the scan thread to adjust its workload accordingly. 2683 */ 2684 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2685 vd == vd->vdev_top && !vd->vdev_islog && 2686 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2687 zio->io_txg != spa_syncing_txg(spa)) { 2688 uint64_t old = spa->spa_last_io; 2689 uint64_t new = ddi_get_lbolt64(); 2690 if (old != new) 2691 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2692 } 2693 2694 align = 1ULL << vd->vdev_top->vdev_ashift; 2695 2696 if ((!(zio->io_flags & ZIO_FLAG_PHYSICAL) || 2697 (vd->vdev_top->vdev_physical_ashift > SPA_MINBLOCKSHIFT)) && 2698 P2PHASE(zio->io_size, align) != 0) { 2699 /* Transform logical writes to be a full physical block size. */ 2700 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2701 char *abuf = NULL; 2702 if (zio->io_type == ZIO_TYPE_READ || 2703 zio->io_type == ZIO_TYPE_WRITE) 2704 abuf = zio_buf_alloc(asize); 2705 ASSERT(vd == vd->vdev_top); 2706 if (zio->io_type == ZIO_TYPE_WRITE) { 2707 bcopy(zio->io_data, abuf, zio->io_size); 2708 bzero(abuf + zio->io_size, asize - zio->io_size); 2709 } 2710 zio_push_transform(zio, abuf, asize, abuf ? asize : 0, 2711 zio_subblock); 2712 } 2713 2714 /* 2715 * If this is not a physical io, make sure that it is properly aligned 2716 * before proceeding. 2717 */ 2718 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 2719 ASSERT0(P2PHASE(zio->io_offset, align)); 2720 ASSERT0(P2PHASE(zio->io_size, align)); 2721 } else { 2722 /* 2723 * For physical writes, we allow 512b aligned writes and assume 2724 * the device will perform a read-modify-write as necessary. 2725 */ 2726 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 2727 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 2728 } 2729 2730 VERIFY(zio->io_type == ZIO_TYPE_READ || spa_writeable(spa)); 2731 2732 /* 2733 * If this is a repair I/O, and there's no self-healing involved -- 2734 * that is, we're just resilvering what we expect to resilver -- 2735 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2736 * This prevents spurious resilvering with nested replication. 2737 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2738 * A is out of date, we'll read from C+D, then use the data to 2739 * resilver A+B -- but we don't actually want to resilver B, just A. 2740 * The top-level mirror has no way to know this, so instead we just 2741 * discard unnecessary repairs as we work our way down the vdev tree. 2742 * The same logic applies to any form of nested replication: 2743 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2744 */ 2745 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2746 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2747 zio->io_txg != 0 && /* not a delegated i/o */ 2748 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2749 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2750 zio_vdev_io_bypass(zio); 2751 return (ZIO_PIPELINE_CONTINUE); 2752 } 2753 2754 if (vd->vdev_ops->vdev_op_leaf) { 2755 switch (zio->io_type) { 2756 case ZIO_TYPE_READ: 2757 if (vdev_cache_read(zio)) 2758 return (ZIO_PIPELINE_CONTINUE); 2759 /* FALLTHROUGH */ 2760 case ZIO_TYPE_WRITE: 2761 case ZIO_TYPE_FREE: 2762 if ((zio = vdev_queue_io(zio)) == NULL) 2763 return (ZIO_PIPELINE_STOP); 2764 2765 if (!vdev_accessible(vd, zio)) { 2766 zio->io_error = SET_ERROR(ENXIO); 2767 zio_interrupt(zio); 2768 return (ZIO_PIPELINE_STOP); 2769 } 2770 break; 2771 } 2772 /* 2773 * Note that we ignore repair writes for TRIM because they can 2774 * conflict with normal writes. This isn't an issue because, by 2775 * definition, we only repair blocks that aren't freed. 2776 */ 2777 if (zio->io_type == ZIO_TYPE_WRITE && 2778 !(zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2779 !trim_map_write_start(zio)) 2780 return (ZIO_PIPELINE_STOP); 2781 } 2782 2783 ret = vd->vdev_ops->vdev_op_io_start(zio); 2784 ASSERT(ret == ZIO_PIPELINE_STOP); 2785 2786 return (ret); 2787} 2788 2789static int 2790zio_vdev_io_done(zio_t *zio) 2791{ 2792 vdev_t *vd = zio->io_vd; 2793 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2794 boolean_t unexpected_error = B_FALSE; 2795 2796 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2797 return (ZIO_PIPELINE_STOP); 2798 2799 ASSERT(zio->io_type == ZIO_TYPE_READ || 2800 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_FREE); 2801 2802 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2803 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE || 2804 zio->io_type == ZIO_TYPE_FREE)) { 2805 2806 if (zio->io_type == ZIO_TYPE_WRITE && 2807 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) 2808 trim_map_write_done(zio); 2809 2810 vdev_queue_io_done(zio); 2811 2812 if (zio->io_type == ZIO_TYPE_WRITE) 2813 vdev_cache_write(zio); 2814 2815 if (zio_injection_enabled && zio->io_error == 0) 2816 zio->io_error = zio_handle_device_injection(vd, 2817 zio, EIO); 2818 2819 if (zio_injection_enabled && zio->io_error == 0) 2820 zio->io_error = zio_handle_label_injection(zio, EIO); 2821 2822 if (zio->io_error) { 2823 if (zio->io_error == ENOTSUP && 2824 zio->io_type == ZIO_TYPE_FREE) { 2825 /* Not all devices support TRIM. */ 2826 } else if (!vdev_accessible(vd, zio)) { 2827 zio->io_error = SET_ERROR(ENXIO); 2828 } else { 2829 unexpected_error = B_TRUE; 2830 } 2831 } 2832 } 2833 2834 ops->vdev_op_io_done(zio); 2835 2836 if (unexpected_error) 2837 VERIFY(vdev_probe(vd, zio) == NULL); 2838 2839 return (ZIO_PIPELINE_CONTINUE); 2840} 2841 2842/* 2843 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2844 * disk, and use that to finish the checksum ereport later. 2845 */ 2846static void 2847zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2848 const void *good_buf) 2849{ 2850 /* no processing needed */ 2851 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2852} 2853 2854/*ARGSUSED*/ 2855void 2856zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2857{ 2858 void *buf = zio_buf_alloc(zio->io_size); 2859 2860 bcopy(zio->io_data, buf, zio->io_size); 2861 2862 zcr->zcr_cbinfo = zio->io_size; 2863 zcr->zcr_cbdata = buf; 2864 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2865 zcr->zcr_free = zio_buf_free; 2866} 2867 2868static int 2869zio_vdev_io_assess(zio_t *zio) 2870{ 2871 vdev_t *vd = zio->io_vd; 2872 2873 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2874 return (ZIO_PIPELINE_STOP); 2875 2876 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2877 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2878 2879 if (zio->io_vsd != NULL) { 2880 zio->io_vsd_ops->vsd_free(zio); 2881 zio->io_vsd = NULL; 2882 } 2883 2884 if (zio_injection_enabled && zio->io_error == 0) 2885 zio->io_error = zio_handle_fault_injection(zio, EIO); 2886 2887 if (zio->io_type == ZIO_TYPE_FREE && 2888 zio->io_priority != ZIO_PRIORITY_NOW) { 2889 switch (zio->io_error) { 2890 case 0: 2891 ZIO_TRIM_STAT_INCR(bytes, zio->io_size); 2892 ZIO_TRIM_STAT_BUMP(success); 2893 break; 2894 case EOPNOTSUPP: 2895 ZIO_TRIM_STAT_BUMP(unsupported); 2896 break; 2897 default: 2898 ZIO_TRIM_STAT_BUMP(failed); 2899 break; 2900 } 2901 } 2902 2903 /* 2904 * If the I/O failed, determine whether we should attempt to retry it. 2905 * 2906 * On retry, we cut in line in the issue queue, since we don't want 2907 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2908 */ 2909 if (zio->io_error && vd == NULL && 2910 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2911 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2912 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2913 zio->io_error = 0; 2914 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2915 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2916 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2917 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2918 zio_requeue_io_start_cut_in_line); 2919 return (ZIO_PIPELINE_STOP); 2920 } 2921 2922 /* 2923 * If we got an error on a leaf device, convert it to ENXIO 2924 * if the device is not accessible at all. 2925 */ 2926 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2927 !vdev_accessible(vd, zio)) 2928 zio->io_error = SET_ERROR(ENXIO); 2929 2930 /* 2931 * If we can't write to an interior vdev (mirror or RAID-Z), 2932 * set vdev_cant_write so that we stop trying to allocate from it. 2933 */ 2934 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2935 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 2936 vd->vdev_cant_write = B_TRUE; 2937 } 2938 2939 if (zio->io_error) 2940 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2941 2942 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2943 zio->io_physdone != NULL) { 2944 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 2945 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 2946 zio->io_physdone(zio->io_logical); 2947 } 2948 2949 return (ZIO_PIPELINE_CONTINUE); 2950} 2951 2952void 2953zio_vdev_io_reissue(zio_t *zio) 2954{ 2955 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2956 ASSERT(zio->io_error == 0); 2957 2958 zio->io_stage >>= 1; 2959} 2960 2961void 2962zio_vdev_io_redone(zio_t *zio) 2963{ 2964 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2965 2966 zio->io_stage >>= 1; 2967} 2968 2969void 2970zio_vdev_io_bypass(zio_t *zio) 2971{ 2972 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2973 ASSERT(zio->io_error == 0); 2974 2975 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2976 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2977} 2978 2979/* 2980 * ========================================================================== 2981 * Generate and verify checksums 2982 * ========================================================================== 2983 */ 2984static int 2985zio_checksum_generate(zio_t *zio) 2986{ 2987 blkptr_t *bp = zio->io_bp; 2988 enum zio_checksum checksum; 2989 2990 if (bp == NULL) { 2991 /* 2992 * This is zio_write_phys(). 2993 * We're either generating a label checksum, or none at all. 2994 */ 2995 checksum = zio->io_prop.zp_checksum; 2996 2997 if (checksum == ZIO_CHECKSUM_OFF) 2998 return (ZIO_PIPELINE_CONTINUE); 2999 3000 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3001 } else { 3002 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3003 ASSERT(!IO_IS_ALLOCATING(zio)); 3004 checksum = ZIO_CHECKSUM_GANG_HEADER; 3005 } else { 3006 checksum = BP_GET_CHECKSUM(bp); 3007 } 3008 } 3009 3010 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 3011 3012 return (ZIO_PIPELINE_CONTINUE); 3013} 3014 3015static int 3016zio_checksum_verify(zio_t *zio) 3017{ 3018 zio_bad_cksum_t info; 3019 blkptr_t *bp = zio->io_bp; 3020 int error; 3021 3022 ASSERT(zio->io_vd != NULL); 3023 3024 if (bp == NULL) { 3025 /* 3026 * This is zio_read_phys(). 3027 * We're either verifying a label checksum, or nothing at all. 3028 */ 3029 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3030 return (ZIO_PIPELINE_CONTINUE); 3031 3032 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3033 } 3034 3035 if ((error = zio_checksum_error(zio, &info)) != 0) { 3036 zio->io_error = error; 3037 if (error == ECKSUM && 3038 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3039 zfs_ereport_start_checksum(zio->io_spa, 3040 zio->io_vd, zio, zio->io_offset, 3041 zio->io_size, NULL, &info); 3042 } 3043 } 3044 3045 return (ZIO_PIPELINE_CONTINUE); 3046} 3047 3048/* 3049 * Called by RAID-Z to ensure we don't compute the checksum twice. 3050 */ 3051void 3052zio_checksum_verified(zio_t *zio) 3053{ 3054 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3055} 3056 3057/* 3058 * ========================================================================== 3059 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 3060 * An error of 0 indicates success. ENXIO indicates whole-device failure, 3061 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3062 * indicate errors that are specific to one I/O, and most likely permanent. 3063 * Any other error is presumed to be worse because we weren't expecting it. 3064 * ========================================================================== 3065 */ 3066int 3067zio_worst_error(int e1, int e2) 3068{ 3069 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3070 int r1, r2; 3071 3072 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3073 if (e1 == zio_error_rank[r1]) 3074 break; 3075 3076 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3077 if (e2 == zio_error_rank[r2]) 3078 break; 3079 3080 return (r1 > r2 ? e1 : e2); 3081} 3082 3083/* 3084 * ========================================================================== 3085 * I/O completion 3086 * ========================================================================== 3087 */ 3088static int 3089zio_ready(zio_t *zio) 3090{ 3091 blkptr_t *bp = zio->io_bp; 3092 zio_t *pio, *pio_next; 3093 3094 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 3095 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 3096 return (ZIO_PIPELINE_STOP); 3097 3098 if (zio->io_ready) { 3099 ASSERT(IO_IS_ALLOCATING(zio)); 3100 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3101 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3102 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3103 3104 zio->io_ready(zio); 3105 } 3106 3107 if (bp != NULL && bp != &zio->io_bp_copy) 3108 zio->io_bp_copy = *bp; 3109 3110 if (zio->io_error) 3111 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3112 3113 mutex_enter(&zio->io_lock); 3114 zio->io_state[ZIO_WAIT_READY] = 1; 3115 pio = zio_walk_parents(zio); 3116 mutex_exit(&zio->io_lock); 3117 3118 /* 3119 * As we notify zio's parents, new parents could be added. 3120 * New parents go to the head of zio's io_parent_list, however, 3121 * so we will (correctly) not notify them. The remainder of zio's 3122 * io_parent_list, from 'pio_next' onward, cannot change because 3123 * all parents must wait for us to be done before they can be done. 3124 */ 3125 for (; pio != NULL; pio = pio_next) { 3126 pio_next = zio_walk_parents(zio); 3127 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3128 } 3129 3130 if (zio->io_flags & ZIO_FLAG_NODATA) { 3131 if (BP_IS_GANG(bp)) { 3132 zio->io_flags &= ~ZIO_FLAG_NODATA; 3133 } else { 3134 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 3135 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3136 } 3137 } 3138 3139 if (zio_injection_enabled && 3140 zio->io_spa->spa_syncing_txg == zio->io_txg) 3141 zio_handle_ignored_writes(zio); 3142 3143 return (ZIO_PIPELINE_CONTINUE); 3144} 3145 3146static int 3147zio_done(zio_t *zio) 3148{ 3149 spa_t *spa = zio->io_spa; 3150 zio_t *lio = zio->io_logical; 3151 blkptr_t *bp = zio->io_bp; 3152 vdev_t *vd = zio->io_vd; 3153 uint64_t psize = zio->io_size; 3154 zio_t *pio, *pio_next; 3155 3156 /* 3157 * If our children haven't all completed, 3158 * wait for them and then repeat this pipeline stage. 3159 */ 3160 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 3161 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 3162 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 3163 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 3164 return (ZIO_PIPELINE_STOP); 3165 3166 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3167 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3168 ASSERT(zio->io_children[c][w] == 0); 3169 3170 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3171 ASSERT(bp->blk_pad[0] == 0); 3172 ASSERT(bp->blk_pad[1] == 0); 3173 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3174 (bp == zio_unique_parent(zio)->io_bp)); 3175 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3176 zio->io_bp_override == NULL && 3177 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3178 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3179 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3180 ASSERT(BP_COUNT_GANG(bp) == 0 || 3181 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3182 } 3183 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3184 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3185 } 3186 3187 /* 3188 * If there were child vdev/gang/ddt errors, they apply to us now. 3189 */ 3190 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3191 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3192 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3193 3194 /* 3195 * If the I/O on the transformed data was successful, generate any 3196 * checksum reports now while we still have the transformed data. 3197 */ 3198 if (zio->io_error == 0) { 3199 while (zio->io_cksum_report != NULL) { 3200 zio_cksum_report_t *zcr = zio->io_cksum_report; 3201 uint64_t align = zcr->zcr_align; 3202 uint64_t asize = P2ROUNDUP(psize, align); 3203 char *abuf = zio->io_data; 3204 3205 if (asize != psize) { 3206 abuf = zio_buf_alloc(asize); 3207 bcopy(zio->io_data, abuf, psize); 3208 bzero(abuf + psize, asize - psize); 3209 } 3210 3211 zio->io_cksum_report = zcr->zcr_next; 3212 zcr->zcr_next = NULL; 3213 zcr->zcr_finish(zcr, abuf); 3214 zfs_ereport_free_checksum(zcr); 3215 3216 if (asize != psize) 3217 zio_buf_free(abuf, asize); 3218 } 3219 } 3220 3221 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3222 3223 vdev_stat_update(zio, psize); 3224 3225 if (zio->io_error) { 3226 /* 3227 * If this I/O is attached to a particular vdev, 3228 * generate an error message describing the I/O failure 3229 * at the block level. We ignore these errors if the 3230 * device is currently unavailable. 3231 */ 3232 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3233 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3234 3235 if ((zio->io_error == EIO || !(zio->io_flags & 3236 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3237 zio == lio) { 3238 /* 3239 * For logical I/O requests, tell the SPA to log the 3240 * error and generate a logical data ereport. 3241 */ 3242 spa_log_error(spa, zio); 3243 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3244 0, 0); 3245 } 3246 } 3247 3248 if (zio->io_error && zio == lio) { 3249 /* 3250 * Determine whether zio should be reexecuted. This will 3251 * propagate all the way to the root via zio_notify_parent(). 3252 */ 3253 ASSERT(vd == NULL && bp != NULL); 3254 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3255 3256 if (IO_IS_ALLOCATING(zio) && 3257 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3258 if (zio->io_error != ENOSPC) 3259 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3260 else 3261 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3262 } 3263 3264 if ((zio->io_type == ZIO_TYPE_READ || 3265 zio->io_type == ZIO_TYPE_FREE) && 3266 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3267 zio->io_error == ENXIO && 3268 spa_load_state(spa) == SPA_LOAD_NONE && 3269 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3270 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3271 3272 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3273 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3274 3275 /* 3276 * Here is a possibly good place to attempt to do 3277 * either combinatorial reconstruction or error correction 3278 * based on checksums. It also might be a good place 3279 * to send out preliminary ereports before we suspend 3280 * processing. 3281 */ 3282 } 3283 3284 /* 3285 * If there were logical child errors, they apply to us now. 3286 * We defer this until now to avoid conflating logical child 3287 * errors with errors that happened to the zio itself when 3288 * updating vdev stats and reporting FMA events above. 3289 */ 3290 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3291 3292 if ((zio->io_error || zio->io_reexecute) && 3293 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3294 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3295 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3296 3297 zio_gang_tree_free(&zio->io_gang_tree); 3298 3299 /* 3300 * Godfather I/Os should never suspend. 3301 */ 3302 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3303 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3304 zio->io_reexecute = 0; 3305 3306 if (zio->io_reexecute) { 3307 /* 3308 * This is a logical I/O that wants to reexecute. 3309 * 3310 * Reexecute is top-down. When an i/o fails, if it's not 3311 * the root, it simply notifies its parent and sticks around. 3312 * The parent, seeing that it still has children in zio_done(), 3313 * does the same. This percolates all the way up to the root. 3314 * The root i/o will reexecute or suspend the entire tree. 3315 * 3316 * This approach ensures that zio_reexecute() honors 3317 * all the original i/o dependency relationships, e.g. 3318 * parents not executing until children are ready. 3319 */ 3320 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3321 3322 zio->io_gang_leader = NULL; 3323 3324 mutex_enter(&zio->io_lock); 3325 zio->io_state[ZIO_WAIT_DONE] = 1; 3326 mutex_exit(&zio->io_lock); 3327 3328 /* 3329 * "The Godfather" I/O monitors its children but is 3330 * not a true parent to them. It will track them through 3331 * the pipeline but severs its ties whenever they get into 3332 * trouble (e.g. suspended). This allows "The Godfather" 3333 * I/O to return status without blocking. 3334 */ 3335 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3336 zio_link_t *zl = zio->io_walk_link; 3337 pio_next = zio_walk_parents(zio); 3338 3339 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3340 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3341 zio_remove_child(pio, zio, zl); 3342 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3343 } 3344 } 3345 3346 if ((pio = zio_unique_parent(zio)) != NULL) { 3347 /* 3348 * We're not a root i/o, so there's nothing to do 3349 * but notify our parent. Don't propagate errors 3350 * upward since we haven't permanently failed yet. 3351 */ 3352 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3353 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3354 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3355 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3356 /* 3357 * We'd fail again if we reexecuted now, so suspend 3358 * until conditions improve (e.g. device comes online). 3359 */ 3360 zio_suspend(spa, zio); 3361 } else { 3362 /* 3363 * Reexecution is potentially a huge amount of work. 3364 * Hand it off to the otherwise-unused claim taskq. 3365 */ 3366#if defined(illumos) || !defined(_KERNEL) 3367 ASSERT(zio->io_tqent.tqent_next == NULL); 3368#else 3369 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 3370#endif 3371 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3372 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3373 0, &zio->io_tqent); 3374 } 3375 return (ZIO_PIPELINE_STOP); 3376 } 3377 3378 ASSERT(zio->io_child_count == 0); 3379 ASSERT(zio->io_reexecute == 0); 3380 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3381 3382 /* 3383 * Report any checksum errors, since the I/O is complete. 3384 */ 3385 while (zio->io_cksum_report != NULL) { 3386 zio_cksum_report_t *zcr = zio->io_cksum_report; 3387 zio->io_cksum_report = zcr->zcr_next; 3388 zcr->zcr_next = NULL; 3389 zcr->zcr_finish(zcr, NULL); 3390 zfs_ereport_free_checksum(zcr); 3391 } 3392 3393 /* 3394 * It is the responsibility of the done callback to ensure that this 3395 * particular zio is no longer discoverable for adoption, and as 3396 * such, cannot acquire any new parents. 3397 */ 3398 if (zio->io_done) 3399 zio->io_done(zio); 3400 3401 mutex_enter(&zio->io_lock); 3402 zio->io_state[ZIO_WAIT_DONE] = 1; 3403 mutex_exit(&zio->io_lock); 3404 3405 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3406 zio_link_t *zl = zio->io_walk_link; 3407 pio_next = zio_walk_parents(zio); 3408 zio_remove_child(pio, zio, zl); 3409 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3410 } 3411 3412 if (zio->io_waiter != NULL) { 3413 mutex_enter(&zio->io_lock); 3414 zio->io_executor = NULL; 3415 cv_broadcast(&zio->io_cv); 3416 mutex_exit(&zio->io_lock); 3417 } else { 3418 zio_destroy(zio); 3419 } 3420 3421 return (ZIO_PIPELINE_STOP); 3422} 3423 3424/* 3425 * ========================================================================== 3426 * I/O pipeline definition 3427 * ========================================================================== 3428 */ 3429static zio_pipe_stage_t *zio_pipeline[] = { 3430 NULL, 3431 zio_read_bp_init, 3432 zio_free_bp_init, 3433 zio_issue_async, 3434 zio_write_bp_init, 3435 zio_checksum_generate, 3436 zio_nop_write, 3437 zio_ddt_read_start, 3438 zio_ddt_read_done, 3439 zio_ddt_write, 3440 zio_ddt_free, 3441 zio_gang_assemble, 3442 zio_gang_issue, 3443 zio_dva_allocate, 3444 zio_dva_free, 3445 zio_dva_claim, 3446 zio_ready, 3447 zio_vdev_io_start, 3448 zio_vdev_io_done, 3449 zio_vdev_io_assess, 3450 zio_checksum_verify, 3451 zio_done 3452}; 3453 3454/* dnp is the dnode for zb1->zb_object */ 3455boolean_t 3456zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_phys_t *zb1, 3457 const zbookmark_phys_t *zb2) 3458{ 3459 uint64_t zb1nextL0, zb2thisobj; 3460 3461 ASSERT(zb1->zb_objset == zb2->zb_objset); 3462 ASSERT(zb2->zb_level == 0); 3463 3464 /* The objset_phys_t isn't before anything. */ 3465 if (dnp == NULL) 3466 return (B_FALSE); 3467 3468 zb1nextL0 = (zb1->zb_blkid + 1) << 3469 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); 3470 3471 zb2thisobj = zb2->zb_object ? zb2->zb_object : 3472 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); 3473 3474 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3475 uint64_t nextobj = zb1nextL0 * 3476 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; 3477 return (nextobj <= zb2thisobj); 3478 } 3479 3480 if (zb1->zb_object < zb2thisobj) 3481 return (B_TRUE); 3482 if (zb1->zb_object > zb2thisobj) 3483 return (B_FALSE); 3484 if (zb2->zb_object == DMU_META_DNODE_OBJECT) 3485 return (B_FALSE); 3486 return (zb1nextL0 <= zb2->zb_blkid); 3487} 3488