dsl_scan.c revision 272665
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 */ 25 26#include <sys/dsl_scan.h> 27#include <sys/dsl_pool.h> 28#include <sys/dsl_dataset.h> 29#include <sys/dsl_prop.h> 30#include <sys/dsl_dir.h> 31#include <sys/dsl_synctask.h> 32#include <sys/dnode.h> 33#include <sys/dmu_tx.h> 34#include <sys/dmu_objset.h> 35#include <sys/arc.h> 36#include <sys/zap.h> 37#include <sys/zio.h> 38#include <sys/zfs_context.h> 39#include <sys/fs/zfs.h> 40#include <sys/zfs_znode.h> 41#include <sys/spa_impl.h> 42#include <sys/vdev_impl.h> 43#include <sys/zil_impl.h> 44#include <sys/zio_checksum.h> 45#include <sys/ddt.h> 46#include <sys/sa.h> 47#include <sys/sa_impl.h> 48#include <sys/zfeature.h> 49#ifdef _KERNEL 50#include <sys/zfs_vfsops.h> 51#endif 52 53typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 54 const zbookmark_phys_t *); 55 56static scan_cb_t dsl_scan_scrub_cb; 57static void dsl_scan_cancel_sync(void *, dmu_tx_t *); 58static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); 59 60unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 61unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 62unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 63unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 64 65unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 66unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 67unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver 68 per txg */ 69boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 70boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 71 72SYSCTL_DECL(_vfs_zfs); 73TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight); 74SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW, 75 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 76TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay); 77SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW, 78 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 79TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay); 80SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW, 81 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 82TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle); 83SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW, 84 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 85TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms); 86SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW, 87 &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); 88TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms); 89SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW, 90 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 91TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms); 92SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW, 93 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 94TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io); 95SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW, 96 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 97TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch); 98SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW, 99 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 100 101enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 102/* max number of blocks to free in a single TXG */ 103uint64_t zfs_free_max_blocks = UINT64_MAX; 104SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, free_max_blocks, CTLFLAG_RWTUN, 105 &zfs_free_max_blocks, 0, "Maximum number of blocks to free in one TXG"); 106 107 108#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 109 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 110 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 111 112extern int zfs_txg_timeout; 113 114/* the order has to match pool_scan_type */ 115static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 116 NULL, 117 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 118 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 119}; 120 121int 122dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 123{ 124 int err; 125 dsl_scan_t *scn; 126 spa_t *spa = dp->dp_spa; 127 uint64_t f; 128 129 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 130 scn->scn_dp = dp; 131 132 /* 133 * It's possible that we're resuming a scan after a reboot so 134 * make sure that the scan_async_destroying flag is initialized 135 * appropriately. 136 */ 137 ASSERT(!scn->scn_async_destroying); 138 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 139 SPA_FEATURE_ASYNC_DESTROY); 140 141 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 142 "scrub_func", sizeof (uint64_t), 1, &f); 143 if (err == 0) { 144 /* 145 * There was an old-style scrub in progress. Restart a 146 * new-style scrub from the beginning. 147 */ 148 scn->scn_restart_txg = txg; 149 zfs_dbgmsg("old-style scrub was in progress; " 150 "restarting new-style scrub in txg %llu", 151 scn->scn_restart_txg); 152 153 /* 154 * Load the queue obj from the old location so that it 155 * can be freed by dsl_scan_done(). 156 */ 157 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 158 "scrub_queue", sizeof (uint64_t), 1, 159 &scn->scn_phys.scn_queue_obj); 160 } else { 161 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 162 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 163 &scn->scn_phys); 164 if (err == ENOENT) 165 return (0); 166 else if (err) 167 return (err); 168 169 if (scn->scn_phys.scn_state == DSS_SCANNING && 170 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 171 /* 172 * A new-type scrub was in progress on an old 173 * pool, and the pool was accessed by old 174 * software. Restart from the beginning, since 175 * the old software may have changed the pool in 176 * the meantime. 177 */ 178 scn->scn_restart_txg = txg; 179 zfs_dbgmsg("new-style scrub was modified " 180 "by old software; restarting in txg %llu", 181 scn->scn_restart_txg); 182 } 183 } 184 185 spa_scan_stat_init(spa); 186 return (0); 187} 188 189void 190dsl_scan_fini(dsl_pool_t *dp) 191{ 192 if (dp->dp_scan) { 193 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 194 dp->dp_scan = NULL; 195 } 196} 197 198/* ARGSUSED */ 199static int 200dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 201{ 202 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 203 204 if (scn->scn_phys.scn_state == DSS_SCANNING) 205 return (SET_ERROR(EBUSY)); 206 207 return (0); 208} 209 210static void 211dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 212{ 213 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 214 pool_scan_func_t *funcp = arg; 215 dmu_object_type_t ot = 0; 216 dsl_pool_t *dp = scn->scn_dp; 217 spa_t *spa = dp->dp_spa; 218 219 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 220 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 221 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 222 scn->scn_phys.scn_func = *funcp; 223 scn->scn_phys.scn_state = DSS_SCANNING; 224 scn->scn_phys.scn_min_txg = 0; 225 scn->scn_phys.scn_max_txg = tx->tx_txg; 226 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 227 scn->scn_phys.scn_start_time = gethrestime_sec(); 228 scn->scn_phys.scn_errors = 0; 229 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 230 scn->scn_restart_txg = 0; 231 scn->scn_done_txg = 0; 232 spa_scan_stat_init(spa); 233 234 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 235 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 236 237 /* rewrite all disk labels */ 238 vdev_config_dirty(spa->spa_root_vdev); 239 240 if (vdev_resilver_needed(spa->spa_root_vdev, 241 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 242 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 243 } else { 244 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); 245 } 246 247 spa->spa_scrub_started = B_TRUE; 248 /* 249 * If this is an incremental scrub, limit the DDT scrub phase 250 * to just the auto-ditto class (for correctness); the rest 251 * of the scrub should go faster using top-down pruning. 252 */ 253 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 254 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 255 256 } 257 258 /* back to the generic stuff */ 259 260 if (dp->dp_blkstats == NULL) { 261 dp->dp_blkstats = 262 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 263 } 264 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 265 266 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 267 ot = DMU_OT_ZAP_OTHER; 268 269 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 270 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 271 272 dsl_scan_sync_state(scn, tx); 273 274 spa_history_log_internal(spa, "scan setup", tx, 275 "func=%u mintxg=%llu maxtxg=%llu", 276 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 277} 278 279/* ARGSUSED */ 280static void 281dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 282{ 283 static const char *old_names[] = { 284 "scrub_bookmark", 285 "scrub_ddt_bookmark", 286 "scrub_ddt_class_max", 287 "scrub_queue", 288 "scrub_min_txg", 289 "scrub_max_txg", 290 "scrub_func", 291 "scrub_errors", 292 NULL 293 }; 294 295 dsl_pool_t *dp = scn->scn_dp; 296 spa_t *spa = dp->dp_spa; 297 int i; 298 299 /* Remove any remnants of an old-style scrub. */ 300 for (i = 0; old_names[i]; i++) { 301 (void) zap_remove(dp->dp_meta_objset, 302 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 303 } 304 305 if (scn->scn_phys.scn_queue_obj != 0) { 306 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 307 scn->scn_phys.scn_queue_obj, tx)); 308 scn->scn_phys.scn_queue_obj = 0; 309 } 310 311 /* 312 * If we were "restarted" from a stopped state, don't bother 313 * with anything else. 314 */ 315 if (scn->scn_phys.scn_state != DSS_SCANNING) 316 return; 317 318 if (complete) 319 scn->scn_phys.scn_state = DSS_FINISHED; 320 else 321 scn->scn_phys.scn_state = DSS_CANCELED; 322 323 spa_history_log_internal(spa, "scan done", tx, 324 "complete=%u", complete); 325 326 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 327 mutex_enter(&spa->spa_scrub_lock); 328 while (spa->spa_scrub_inflight > 0) { 329 cv_wait(&spa->spa_scrub_io_cv, 330 &spa->spa_scrub_lock); 331 } 332 mutex_exit(&spa->spa_scrub_lock); 333 spa->spa_scrub_started = B_FALSE; 334 spa->spa_scrub_active = B_FALSE; 335 336 /* 337 * If the scrub/resilver completed, update all DTLs to 338 * reflect this. Whether it succeeded or not, vacate 339 * all temporary scrub DTLs. 340 */ 341 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 342 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 343 if (complete) { 344 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? 345 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 346 } 347 spa_errlog_rotate(spa); 348 349 /* 350 * We may have finished replacing a device. 351 * Let the async thread assess this and handle the detach. 352 */ 353 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 354 } 355 356 scn->scn_phys.scn_end_time = gethrestime_sec(); 357} 358 359/* ARGSUSED */ 360static int 361dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 362{ 363 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 364 365 if (scn->scn_phys.scn_state != DSS_SCANNING) 366 return (SET_ERROR(ENOENT)); 367 return (0); 368} 369 370/* ARGSUSED */ 371static void 372dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 373{ 374 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 375 376 dsl_scan_done(scn, B_FALSE, tx); 377 dsl_scan_sync_state(scn, tx); 378} 379 380int 381dsl_scan_cancel(dsl_pool_t *dp) 382{ 383 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 384 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 385} 386 387static void dsl_scan_visitbp(blkptr_t *bp, 388 const zbookmark_phys_t *zb, dnode_phys_t *dnp, arc_buf_t *pbuf, 389 dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype, 390 dmu_tx_t *tx); 391static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 392 dmu_objset_type_t ostype, 393 dnode_phys_t *dnp, arc_buf_t *buf, uint64_t object, dmu_tx_t *tx); 394 395void 396dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 397{ 398 zio_free(dp->dp_spa, txg, bp); 399} 400 401void 402dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 403{ 404 ASSERT(dsl_pool_sync_context(dp)); 405 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 406 pio->io_flags)); 407} 408 409static uint64_t 410dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 411{ 412 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 413 if (dsl_dataset_is_snapshot(ds)) 414 return (MIN(smt, ds->ds_phys->ds_creation_txg)); 415 return (smt); 416} 417 418static void 419dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 420{ 421 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 422 DMU_POOL_DIRECTORY_OBJECT, 423 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 424 &scn->scn_phys, tx)); 425} 426 427static boolean_t 428dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_phys_t *zb) 429{ 430 uint64_t elapsed_nanosecs; 431 unsigned int mintime; 432 433 /* we never skip user/group accounting objects */ 434 if (zb && (int64_t)zb->zb_object < 0) 435 return (B_FALSE); 436 437 if (scn->scn_pausing) 438 return (B_TRUE); /* we're already pausing */ 439 440 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 441 return (B_FALSE); /* we're resuming */ 442 443 /* We only know how to resume from level-0 blocks. */ 444 if (zb && zb->zb_level != 0) 445 return (B_FALSE); 446 447 mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 448 zfs_resilver_min_time_ms : zfs_scan_min_time_ms; 449 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 450 if (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 451 (NSEC2MSEC(elapsed_nanosecs) > mintime && 452 txg_sync_waiting(scn->scn_dp)) || 453 spa_shutting_down(scn->scn_dp->dp_spa)) { 454 if (zb) { 455 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n", 456 (longlong_t)zb->zb_objset, 457 (longlong_t)zb->zb_object, 458 (longlong_t)zb->zb_level, 459 (longlong_t)zb->zb_blkid); 460 scn->scn_phys.scn_bookmark = *zb; 461 } 462 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n", 463 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 464 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 465 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 466 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 467 scn->scn_pausing = B_TRUE; 468 return (B_TRUE); 469 } 470 return (B_FALSE); 471} 472 473typedef struct zil_scan_arg { 474 dsl_pool_t *zsa_dp; 475 zil_header_t *zsa_zh; 476} zil_scan_arg_t; 477 478/* ARGSUSED */ 479static int 480dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 481{ 482 zil_scan_arg_t *zsa = arg; 483 dsl_pool_t *dp = zsa->zsa_dp; 484 dsl_scan_t *scn = dp->dp_scan; 485 zil_header_t *zh = zsa->zsa_zh; 486 zbookmark_phys_t zb; 487 488 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 489 return (0); 490 491 /* 492 * One block ("stubby") can be allocated a long time ago; we 493 * want to visit that one because it has been allocated 494 * (on-disk) even if it hasn't been claimed (even though for 495 * scrub there's nothing to do to it). 496 */ 497 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 498 return (0); 499 500 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 501 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 502 503 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 504 return (0); 505} 506 507/* ARGSUSED */ 508static int 509dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 510{ 511 if (lrc->lrc_txtype == TX_WRITE) { 512 zil_scan_arg_t *zsa = arg; 513 dsl_pool_t *dp = zsa->zsa_dp; 514 dsl_scan_t *scn = dp->dp_scan; 515 zil_header_t *zh = zsa->zsa_zh; 516 lr_write_t *lr = (lr_write_t *)lrc; 517 blkptr_t *bp = &lr->lr_blkptr; 518 zbookmark_phys_t zb; 519 520 if (BP_IS_HOLE(bp) || 521 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 522 return (0); 523 524 /* 525 * birth can be < claim_txg if this record's txg is 526 * already txg sync'ed (but this log block contains 527 * other records that are not synced) 528 */ 529 if (claim_txg == 0 || bp->blk_birth < claim_txg) 530 return (0); 531 532 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 533 lr->lr_foid, ZB_ZIL_LEVEL, 534 lr->lr_offset / BP_GET_LSIZE(bp)); 535 536 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 537 } 538 return (0); 539} 540 541static void 542dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 543{ 544 uint64_t claim_txg = zh->zh_claim_txg; 545 zil_scan_arg_t zsa = { dp, zh }; 546 zilog_t *zilog; 547 548 /* 549 * We only want to visit blocks that have been claimed but not yet 550 * replayed (or, in read-only mode, blocks that *would* be claimed). 551 */ 552 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 553 return; 554 555 zilog = zil_alloc(dp->dp_meta_objset, zh); 556 557 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 558 claim_txg); 559 560 zil_free(zilog); 561} 562 563/* ARGSUSED */ 564static void 565dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 566 uint64_t objset, uint64_t object, uint64_t blkid) 567{ 568 zbookmark_phys_t czb; 569 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH; 570 571 if (zfs_no_scrub_prefetch) 572 return; 573 574 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 575 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 576 return; 577 578 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid); 579 580 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 581 NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 582 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb); 583} 584 585static boolean_t 586dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 587 const zbookmark_phys_t *zb) 588{ 589 /* 590 * We never skip over user/group accounting objects (obj<0) 591 */ 592 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 593 (int64_t)zb->zb_object >= 0) { 594 /* 595 * If we already visited this bp & everything below (in 596 * a prior txg sync), don't bother doing it again. 597 */ 598 if (zbookmark_is_before(dnp, zb, &scn->scn_phys.scn_bookmark)) 599 return (B_TRUE); 600 601 /* 602 * If we found the block we're trying to resume from, or 603 * we went past it to a different object, zero it out to 604 * indicate that it's OK to start checking for pausing 605 * again. 606 */ 607 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 608 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 609 dprintf("resuming at %llx/%llx/%llx/%llx\n", 610 (longlong_t)zb->zb_objset, 611 (longlong_t)zb->zb_object, 612 (longlong_t)zb->zb_level, 613 (longlong_t)zb->zb_blkid); 614 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 615 } 616 } 617 return (B_FALSE); 618} 619 620/* 621 * Return nonzero on i/o error. 622 * Return new buf to write out in *bufp. 623 */ 624static int 625dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 626 dnode_phys_t *dnp, const blkptr_t *bp, 627 const zbookmark_phys_t *zb, dmu_tx_t *tx, arc_buf_t **bufp) 628{ 629 dsl_pool_t *dp = scn->scn_dp; 630 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 631 int err; 632 633 if (BP_GET_LEVEL(bp) > 0) { 634 uint32_t flags = ARC_WAIT; 635 int i; 636 blkptr_t *cbp; 637 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 638 639 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 640 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 641 if (err) { 642 scn->scn_phys.scn_errors++; 643 return (err); 644 } 645 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) { 646 dsl_scan_prefetch(scn, *bufp, cbp, zb->zb_objset, 647 zb->zb_object, zb->zb_blkid * epb + i); 648 } 649 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) { 650 zbookmark_phys_t czb; 651 652 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 653 zb->zb_level - 1, 654 zb->zb_blkid * epb + i); 655 dsl_scan_visitbp(cbp, &czb, dnp, 656 *bufp, ds, scn, ostype, tx); 657 } 658 } else if (BP_GET_TYPE(bp) == DMU_OT_USERGROUP_USED) { 659 uint32_t flags = ARC_WAIT; 660 661 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 662 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 663 if (err) { 664 scn->scn_phys.scn_errors++; 665 return (err); 666 } 667 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 668 uint32_t flags = ARC_WAIT; 669 dnode_phys_t *cdnp; 670 int i, j; 671 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 672 673 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 674 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 675 if (err) { 676 scn->scn_phys.scn_errors++; 677 return (err); 678 } 679 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) { 680 for (j = 0; j < cdnp->dn_nblkptr; j++) { 681 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 682 dsl_scan_prefetch(scn, *bufp, cbp, 683 zb->zb_objset, zb->zb_blkid * epb + i, j); 684 } 685 } 686 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) { 687 dsl_scan_visitdnode(scn, ds, ostype, 688 cdnp, *bufp, zb->zb_blkid * epb + i, tx); 689 } 690 691 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 692 uint32_t flags = ARC_WAIT; 693 objset_phys_t *osp; 694 695 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 696 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 697 if (err) { 698 scn->scn_phys.scn_errors++; 699 return (err); 700 } 701 702 osp = (*bufp)->b_data; 703 704 dsl_scan_visitdnode(scn, ds, osp->os_type, 705 &osp->os_meta_dnode, *bufp, DMU_META_DNODE_OBJECT, tx); 706 707 if (OBJSET_BUF_HAS_USERUSED(*bufp)) { 708 /* 709 * We also always visit user/group accounting 710 * objects, and never skip them, even if we are 711 * pausing. This is necessary so that the space 712 * deltas from this txg get integrated. 713 */ 714 dsl_scan_visitdnode(scn, ds, osp->os_type, 715 &osp->os_groupused_dnode, *bufp, 716 DMU_GROUPUSED_OBJECT, tx); 717 dsl_scan_visitdnode(scn, ds, osp->os_type, 718 &osp->os_userused_dnode, *bufp, 719 DMU_USERUSED_OBJECT, tx); 720 } 721 } 722 723 return (0); 724} 725 726static void 727dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 728 dmu_objset_type_t ostype, dnode_phys_t *dnp, arc_buf_t *buf, 729 uint64_t object, dmu_tx_t *tx) 730{ 731 int j; 732 733 for (j = 0; j < dnp->dn_nblkptr; j++) { 734 zbookmark_phys_t czb; 735 736 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 737 dnp->dn_nlevels - 1, j); 738 dsl_scan_visitbp(&dnp->dn_blkptr[j], 739 &czb, dnp, buf, ds, scn, ostype, tx); 740 } 741 742 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 743 zbookmark_phys_t czb; 744 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 745 0, DMU_SPILL_BLKID); 746 dsl_scan_visitbp(&dnp->dn_spill, 747 &czb, dnp, buf, ds, scn, ostype, tx); 748 } 749} 750 751/* 752 * The arguments are in this order because mdb can only print the 753 * first 5; we want them to be useful. 754 */ 755static void 756dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 757 dnode_phys_t *dnp, arc_buf_t *pbuf, 758 dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype, 759 dmu_tx_t *tx) 760{ 761 dsl_pool_t *dp = scn->scn_dp; 762 arc_buf_t *buf = NULL; 763 blkptr_t bp_toread = *bp; 764 765 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */ 766 767 if (dsl_scan_check_pause(scn, zb)) 768 return; 769 770 if (dsl_scan_check_resume(scn, dnp, zb)) 771 return; 772 773 if (BP_IS_HOLE(bp)) 774 return; 775 776 scn->scn_visited_this_txg++; 777 778 dprintf_bp(bp, 779 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx buf=%p bp=%p", 780 ds, ds ? ds->ds_object : 0, 781 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 782 pbuf, bp); 783 784 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 785 return; 786 787 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx, 788 &buf) != 0) 789 return; 790 791 /* 792 * If dsl_scan_ddt() has aready visited this block, it will have 793 * already done any translations or scrubbing, so don't call the 794 * callback again. 795 */ 796 if (ddt_class_contains(dp->dp_spa, 797 scn->scn_phys.scn_ddt_class_max, bp)) { 798 ASSERT(buf == NULL); 799 return; 800 } 801 802 /* 803 * If this block is from the future (after cur_max_txg), then we 804 * are doing this on behalf of a deleted snapshot, and we will 805 * revisit the future block on the next pass of this dataset. 806 * Don't scan it now unless we need to because something 807 * under it was modified. 808 */ 809 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) { 810 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 811 } 812 if (buf) 813 (void) arc_buf_remove_ref(buf, &buf); 814} 815 816static void 817dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 818 dmu_tx_t *tx) 819{ 820 zbookmark_phys_t zb; 821 822 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 823 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 824 dsl_scan_visitbp(bp, &zb, NULL, NULL, 825 ds, scn, DMU_OST_NONE, tx); 826 827 dprintf_ds(ds, "finished scan%s", ""); 828} 829 830void 831dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 832{ 833 dsl_pool_t *dp = ds->ds_dir->dd_pool; 834 dsl_scan_t *scn = dp->dp_scan; 835 uint64_t mintxg; 836 837 if (scn->scn_phys.scn_state != DSS_SCANNING) 838 return; 839 840 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 841 if (dsl_dataset_is_snapshot(ds)) { 842 /* Note, scn_cur_{min,max}_txg stays the same. */ 843 scn->scn_phys.scn_bookmark.zb_objset = 844 ds->ds_phys->ds_next_snap_obj; 845 zfs_dbgmsg("destroying ds %llu; currently traversing; " 846 "reset zb_objset to %llu", 847 (u_longlong_t)ds->ds_object, 848 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 849 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 850 } else { 851 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 852 ZB_DESTROYED_OBJSET, 0, 0, 0); 853 zfs_dbgmsg("destroying ds %llu; currently traversing; " 854 "reset bookmark to -1,0,0,0", 855 (u_longlong_t)ds->ds_object); 856 } 857 } else if (zap_lookup_int_key(dp->dp_meta_objset, 858 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 859 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); 860 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 861 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 862 if (dsl_dataset_is_snapshot(ds)) { 863 /* 864 * We keep the same mintxg; it could be > 865 * ds_creation_txg if the previous snapshot was 866 * deleted too. 867 */ 868 VERIFY(zap_add_int_key(dp->dp_meta_objset, 869 scn->scn_phys.scn_queue_obj, 870 ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0); 871 zfs_dbgmsg("destroying ds %llu; in queue; " 872 "replacing with %llu", 873 (u_longlong_t)ds->ds_object, 874 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 875 } else { 876 zfs_dbgmsg("destroying ds %llu; in queue; removing", 877 (u_longlong_t)ds->ds_object); 878 } 879 } else { 880 zfs_dbgmsg("destroying ds %llu; ignoring", 881 (u_longlong_t)ds->ds_object); 882 } 883 884 /* 885 * dsl_scan_sync() should be called after this, and should sync 886 * out our changed state, but just to be safe, do it here. 887 */ 888 dsl_scan_sync_state(scn, tx); 889} 890 891void 892dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 893{ 894 dsl_pool_t *dp = ds->ds_dir->dd_pool; 895 dsl_scan_t *scn = dp->dp_scan; 896 uint64_t mintxg; 897 898 if (scn->scn_phys.scn_state != DSS_SCANNING) 899 return; 900 901 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0); 902 903 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 904 scn->scn_phys.scn_bookmark.zb_objset = 905 ds->ds_phys->ds_prev_snap_obj; 906 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 907 "reset zb_objset to %llu", 908 (u_longlong_t)ds->ds_object, 909 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 910 } else if (zap_lookup_int_key(dp->dp_meta_objset, 911 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 912 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 913 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 914 VERIFY(zap_add_int_key(dp->dp_meta_objset, 915 scn->scn_phys.scn_queue_obj, 916 ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0); 917 zfs_dbgmsg("snapshotting ds %llu; in queue; " 918 "replacing with %llu", 919 (u_longlong_t)ds->ds_object, 920 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 921 } 922 dsl_scan_sync_state(scn, tx); 923} 924 925void 926dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 927{ 928 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 929 dsl_scan_t *scn = dp->dp_scan; 930 uint64_t mintxg; 931 932 if (scn->scn_phys.scn_state != DSS_SCANNING) 933 return; 934 935 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 936 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 937 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 938 "reset zb_objset to %llu", 939 (u_longlong_t)ds1->ds_object, 940 (u_longlong_t)ds2->ds_object); 941 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 942 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 943 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 944 "reset zb_objset to %llu", 945 (u_longlong_t)ds2->ds_object, 946 (u_longlong_t)ds1->ds_object); 947 } 948 949 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 950 ds1->ds_object, &mintxg) == 0) { 951 int err; 952 953 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 954 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 955 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 956 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 957 err = zap_add_int_key(dp->dp_meta_objset, 958 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 959 VERIFY(err == 0 || err == EEXIST); 960 if (err == EEXIST) { 961 /* Both were there to begin with */ 962 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 963 scn->scn_phys.scn_queue_obj, 964 ds1->ds_object, mintxg, tx)); 965 } 966 zfs_dbgmsg("clone_swap ds %llu; in queue; " 967 "replacing with %llu", 968 (u_longlong_t)ds1->ds_object, 969 (u_longlong_t)ds2->ds_object); 970 } else if (zap_lookup_int_key(dp->dp_meta_objset, 971 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 972 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 973 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 974 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 975 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 976 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 977 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 978 zfs_dbgmsg("clone_swap ds %llu; in queue; " 979 "replacing with %llu", 980 (u_longlong_t)ds2->ds_object, 981 (u_longlong_t)ds1->ds_object); 982 } 983 984 dsl_scan_sync_state(scn, tx); 985} 986 987struct enqueue_clones_arg { 988 dmu_tx_t *tx; 989 uint64_t originobj; 990}; 991 992/* ARGSUSED */ 993static int 994enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 995{ 996 struct enqueue_clones_arg *eca = arg; 997 dsl_dataset_t *ds; 998 int err; 999 dsl_scan_t *scn = dp->dp_scan; 1000 1001 if (hds->ds_dir->dd_phys->dd_origin_obj != eca->originobj) 1002 return (0); 1003 1004 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1005 if (err) 1006 return (err); 1007 1008 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) { 1009 dsl_dataset_t *prev; 1010 err = dsl_dataset_hold_obj(dp, 1011 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); 1012 1013 dsl_dataset_rele(ds, FTAG); 1014 if (err) 1015 return (err); 1016 ds = prev; 1017 } 1018 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1019 scn->scn_phys.scn_queue_obj, ds->ds_object, 1020 ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0); 1021 dsl_dataset_rele(ds, FTAG); 1022 return (0); 1023} 1024 1025static void 1026dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1027{ 1028 dsl_pool_t *dp = scn->scn_dp; 1029 dsl_dataset_t *ds; 1030 objset_t *os; 1031 1032 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1033 1034 if (dmu_objset_from_ds(ds, &os)) 1035 goto out; 1036 1037 /* 1038 * Only the ZIL in the head (non-snapshot) is valid. Even though 1039 * snapshots can have ZIL block pointers (which may be the same 1040 * BP as in the head), they must be ignored. So we traverse the 1041 * ZIL here, rather than in scan_recurse(), because the regular 1042 * snapshot block-sharing rules don't apply to it. 1043 */ 1044 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds)) 1045 dsl_scan_zil(dp, &os->os_zil_header); 1046 1047 /* 1048 * Iterate over the bps in this ds. 1049 */ 1050 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1051 dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx); 1052 1053 char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); 1054 dsl_dataset_name(ds, dsname); 1055 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1056 "pausing=%u", 1057 (longlong_t)dsobj, dsname, 1058 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1059 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1060 (int)scn->scn_pausing); 1061 kmem_free(dsname, ZFS_MAXNAMELEN); 1062 1063 if (scn->scn_pausing) 1064 goto out; 1065 1066 /* 1067 * We've finished this pass over this dataset. 1068 */ 1069 1070 /* 1071 * If we did not completely visit this dataset, do another pass. 1072 */ 1073 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1074 zfs_dbgmsg("incomplete pass; visiting again"); 1075 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1076 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1077 scn->scn_phys.scn_queue_obj, ds->ds_object, 1078 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1079 goto out; 1080 } 1081 1082 /* 1083 * Add descendent datasets to work queue. 1084 */ 1085 if (ds->ds_phys->ds_next_snap_obj != 0) { 1086 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1087 scn->scn_phys.scn_queue_obj, ds->ds_phys->ds_next_snap_obj, 1088 ds->ds_phys->ds_creation_txg, tx) == 0); 1089 } 1090 if (ds->ds_phys->ds_num_children > 1) { 1091 boolean_t usenext = B_FALSE; 1092 if (ds->ds_phys->ds_next_clones_obj != 0) { 1093 uint64_t count; 1094 /* 1095 * A bug in a previous version of the code could 1096 * cause upgrade_clones_cb() to not set 1097 * ds_next_snap_obj when it should, leading to a 1098 * missing entry. Therefore we can only use the 1099 * next_clones_obj when its count is correct. 1100 */ 1101 int err = zap_count(dp->dp_meta_objset, 1102 ds->ds_phys->ds_next_clones_obj, &count); 1103 if (err == 0 && 1104 count == ds->ds_phys->ds_num_children - 1) 1105 usenext = B_TRUE; 1106 } 1107 1108 if (usenext) { 1109 VERIFY0(zap_join_key(dp->dp_meta_objset, 1110 ds->ds_phys->ds_next_clones_obj, 1111 scn->scn_phys.scn_queue_obj, 1112 ds->ds_phys->ds_creation_txg, tx)); 1113 } else { 1114 struct enqueue_clones_arg eca; 1115 eca.tx = tx; 1116 eca.originobj = ds->ds_object; 1117 1118 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1119 enqueue_clones_cb, &eca, DS_FIND_CHILDREN)); 1120 } 1121 } 1122 1123out: 1124 dsl_dataset_rele(ds, FTAG); 1125} 1126 1127/* ARGSUSED */ 1128static int 1129enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1130{ 1131 dmu_tx_t *tx = arg; 1132 dsl_dataset_t *ds; 1133 int err; 1134 dsl_scan_t *scn = dp->dp_scan; 1135 1136 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1137 if (err) 1138 return (err); 1139 1140 while (ds->ds_phys->ds_prev_snap_obj != 0) { 1141 dsl_dataset_t *prev; 1142 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 1143 FTAG, &prev); 1144 if (err) { 1145 dsl_dataset_rele(ds, FTAG); 1146 return (err); 1147 } 1148 1149 /* 1150 * If this is a clone, we don't need to worry about it for now. 1151 */ 1152 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) { 1153 dsl_dataset_rele(ds, FTAG); 1154 dsl_dataset_rele(prev, FTAG); 1155 return (0); 1156 } 1157 dsl_dataset_rele(ds, FTAG); 1158 ds = prev; 1159 } 1160 1161 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1162 ds->ds_object, ds->ds_phys->ds_prev_snap_txg, tx) == 0); 1163 dsl_dataset_rele(ds, FTAG); 1164 return (0); 1165} 1166 1167/* 1168 * Scrub/dedup interaction. 1169 * 1170 * If there are N references to a deduped block, we don't want to scrub it 1171 * N times -- ideally, we should scrub it exactly once. 1172 * 1173 * We leverage the fact that the dde's replication class (enum ddt_class) 1174 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1175 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1176 * 1177 * To prevent excess scrubbing, the scrub begins by walking the DDT 1178 * to find all blocks with refcnt > 1, and scrubs each of these once. 1179 * Since there are two replication classes which contain blocks with 1180 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1181 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1182 * 1183 * There would be nothing more to say if a block's refcnt couldn't change 1184 * during a scrub, but of course it can so we must account for changes 1185 * in a block's replication class. 1186 * 1187 * Here's an example of what can occur: 1188 * 1189 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1190 * when visited during the top-down scrub phase, it will be scrubbed twice. 1191 * This negates our scrub optimization, but is otherwise harmless. 1192 * 1193 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1194 * on each visit during the top-down scrub phase, it will never be scrubbed. 1195 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1196 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1197 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1198 * while a scrub is in progress, it scrubs the block right then. 1199 */ 1200static void 1201dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1202{ 1203 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1204 ddt_entry_t dde = { 0 }; 1205 int error; 1206 uint64_t n = 0; 1207 1208 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1209 ddt_t *ddt; 1210 1211 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1212 break; 1213 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1214 (longlong_t)ddb->ddb_class, 1215 (longlong_t)ddb->ddb_type, 1216 (longlong_t)ddb->ddb_checksum, 1217 (longlong_t)ddb->ddb_cursor); 1218 1219 /* There should be no pending changes to the dedup table */ 1220 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1221 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1222 1223 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1224 n++; 1225 1226 if (dsl_scan_check_pause(scn, NULL)) 1227 break; 1228 } 1229 1230 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u", 1231 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max, 1232 (int)scn->scn_pausing); 1233 1234 ASSERT(error == 0 || error == ENOENT); 1235 ASSERT(error != ENOENT || 1236 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1237} 1238 1239/* ARGSUSED */ 1240void 1241dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1242 ddt_entry_t *dde, dmu_tx_t *tx) 1243{ 1244 const ddt_key_t *ddk = &dde->dde_key; 1245 ddt_phys_t *ddp = dde->dde_phys; 1246 blkptr_t bp; 1247 zbookmark_phys_t zb = { 0 }; 1248 1249 if (scn->scn_phys.scn_state != DSS_SCANNING) 1250 return; 1251 1252 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1253 if (ddp->ddp_phys_birth == 0 || 1254 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 1255 continue; 1256 ddt_bp_create(checksum, ddk, ddp, &bp); 1257 1258 scn->scn_visited_this_txg++; 1259 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1260 } 1261} 1262 1263static void 1264dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1265{ 1266 dsl_pool_t *dp = scn->scn_dp; 1267 zap_cursor_t zc; 1268 zap_attribute_t za; 1269 1270 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1271 scn->scn_phys.scn_ddt_class_max) { 1272 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1273 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1274 dsl_scan_ddt(scn, tx); 1275 if (scn->scn_pausing) 1276 return; 1277 } 1278 1279 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1280 /* First do the MOS & ORIGIN */ 1281 1282 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1283 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1284 dsl_scan_visit_rootbp(scn, NULL, 1285 &dp->dp_meta_rootbp, tx); 1286 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1287 if (scn->scn_pausing) 1288 return; 1289 1290 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1291 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1292 enqueue_cb, tx, DS_FIND_CHILDREN)); 1293 } else { 1294 dsl_scan_visitds(scn, 1295 dp->dp_origin_snap->ds_object, tx); 1296 } 1297 ASSERT(!scn->scn_pausing); 1298 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1299 ZB_DESTROYED_OBJSET) { 1300 /* 1301 * If we were paused, continue from here. Note if the 1302 * ds we were paused on was deleted, the zb_objset may 1303 * be -1, so we will skip this and find a new objset 1304 * below. 1305 */ 1306 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1307 if (scn->scn_pausing) 1308 return; 1309 } 1310 1311 /* 1312 * In case we were paused right at the end of the ds, zero the 1313 * bookmark so we don't think that we're still trying to resume. 1314 */ 1315 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 1316 1317 /* keep pulling things out of the zap-object-as-queue */ 1318 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1319 scn->scn_phys.scn_queue_obj), 1320 zap_cursor_retrieve(&zc, &za) == 0) { 1321 dsl_dataset_t *ds; 1322 uint64_t dsobj; 1323 1324 dsobj = strtonum(za.za_name, NULL); 1325 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1326 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1327 1328 /* Set up min/max txg */ 1329 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1330 if (za.za_first_integer != 0) { 1331 scn->scn_phys.scn_cur_min_txg = 1332 MAX(scn->scn_phys.scn_min_txg, 1333 za.za_first_integer); 1334 } else { 1335 scn->scn_phys.scn_cur_min_txg = 1336 MAX(scn->scn_phys.scn_min_txg, 1337 ds->ds_phys->ds_prev_snap_txg); 1338 } 1339 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1340 dsl_dataset_rele(ds, FTAG); 1341 1342 dsl_scan_visitds(scn, dsobj, tx); 1343 zap_cursor_fini(&zc); 1344 if (scn->scn_pausing) 1345 return; 1346 } 1347 zap_cursor_fini(&zc); 1348} 1349 1350static boolean_t 1351dsl_scan_free_should_pause(dsl_scan_t *scn) 1352{ 1353 uint64_t elapsed_nanosecs; 1354 1355 if (zfs_recover) 1356 return (B_FALSE); 1357 1358 if (scn->scn_visited_this_txg >= zfs_free_max_blocks) 1359 return (B_TRUE); 1360 1361 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1362 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1363 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms && 1364 txg_sync_waiting(scn->scn_dp)) || 1365 spa_shutting_down(scn->scn_dp->dp_spa)); 1366} 1367 1368static int 1369dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1370{ 1371 dsl_scan_t *scn = arg; 1372 1373 if (!scn->scn_is_bptree || 1374 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1375 if (dsl_scan_free_should_pause(scn)) 1376 return (SET_ERROR(ERESTART)); 1377 } 1378 1379 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1380 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 1381 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1382 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1383 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1384 scn->scn_visited_this_txg++; 1385 return (0); 1386} 1387 1388boolean_t 1389dsl_scan_active(dsl_scan_t *scn) 1390{ 1391 spa_t *spa = scn->scn_dp->dp_spa; 1392 uint64_t used = 0, comp, uncomp; 1393 1394 if (spa->spa_load_state != SPA_LOAD_NONE) 1395 return (B_FALSE); 1396 if (spa_shutting_down(spa)) 1397 return (B_FALSE); 1398 if (scn->scn_phys.scn_state == DSS_SCANNING || 1399 (scn->scn_async_destroying && !scn->scn_async_stalled)) 1400 return (B_TRUE); 1401 1402 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1403 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1404 &used, &comp, &uncomp); 1405 } 1406 return (used != 0); 1407} 1408 1409void 1410dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1411{ 1412 dsl_scan_t *scn = dp->dp_scan; 1413 spa_t *spa = dp->dp_spa; 1414 int err = 0; 1415 1416 /* 1417 * Check for scn_restart_txg before checking spa_load_state, so 1418 * that we can restart an old-style scan while the pool is being 1419 * imported (see dsl_scan_init). 1420 */ 1421 if (scn->scn_restart_txg != 0 && 1422 scn->scn_restart_txg <= tx->tx_txg) { 1423 pool_scan_func_t func = POOL_SCAN_SCRUB; 1424 dsl_scan_done(scn, B_FALSE, tx); 1425 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1426 func = POOL_SCAN_RESILVER; 1427 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1428 func, tx->tx_txg); 1429 dsl_scan_setup_sync(&func, tx); 1430 } 1431 1432 /* 1433 * If the scan is inactive due to a stalled async destroy, try again. 1434 */ 1435 if ((!scn->scn_async_stalled && !dsl_scan_active(scn)) || 1436 spa_sync_pass(dp->dp_spa) > 1) 1437 return; 1438 1439 scn->scn_visited_this_txg = 0; 1440 scn->scn_pausing = B_FALSE; 1441 scn->scn_sync_start_time = gethrtime(); 1442 spa->spa_scrub_active = B_TRUE; 1443 1444 /* 1445 * First process the async destroys. If we pause, don't do 1446 * any scrubbing or resilvering. This ensures that there are no 1447 * async destroys while we are scanning, so the scan code doesn't 1448 * have to worry about traversing it. It is also faster to free the 1449 * blocks than to scrub them. 1450 */ 1451 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1452 scn->scn_is_bptree = B_FALSE; 1453 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1454 NULL, ZIO_FLAG_MUSTSUCCEED); 1455 err = bpobj_iterate(&dp->dp_free_bpobj, 1456 dsl_scan_free_block_cb, scn, tx); 1457 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1458 1459 if (err != 0 && err != ERESTART) 1460 zfs_panic_recover("error %u from bpobj_iterate()", err); 1461 } 1462 1463 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 1464 ASSERT(scn->scn_async_destroying); 1465 scn->scn_is_bptree = B_TRUE; 1466 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1467 NULL, ZIO_FLAG_MUSTSUCCEED); 1468 err = bptree_iterate(dp->dp_meta_objset, 1469 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 1470 VERIFY0(zio_wait(scn->scn_zio_root)); 1471 1472 if (err == EIO || err == ECKSUM) { 1473 err = 0; 1474 } else if (err != 0 && err != ERESTART) { 1475 zfs_panic_recover("error %u from " 1476 "traverse_dataset_destroyed()", err); 1477 } 1478 1479 /* 1480 * If we didn't make progress, mark the async destroy as 1481 * stalled, so that we will not initiate a spa_sync() on 1482 * its behalf. 1483 */ 1484 scn->scn_async_stalled = (scn->scn_visited_this_txg == 0); 1485 1486 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 1487 /* finished; deactivate async destroy feature */ 1488 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 1489 ASSERT(!spa_feature_is_active(spa, 1490 SPA_FEATURE_ASYNC_DESTROY)); 1491 VERIFY0(zap_remove(dp->dp_meta_objset, 1492 DMU_POOL_DIRECTORY_OBJECT, 1493 DMU_POOL_BPTREE_OBJ, tx)); 1494 VERIFY0(bptree_free(dp->dp_meta_objset, 1495 dp->dp_bptree_obj, tx)); 1496 dp->dp_bptree_obj = 0; 1497 scn->scn_async_destroying = B_FALSE; 1498 } 1499 } 1500 if (scn->scn_visited_this_txg) { 1501 zfs_dbgmsg("freed %llu blocks in %llums from " 1502 "free_bpobj/bptree txg %llu; err=%u", 1503 (longlong_t)scn->scn_visited_this_txg, 1504 (longlong_t) 1505 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 1506 (longlong_t)tx->tx_txg, err); 1507 scn->scn_visited_this_txg = 0; 1508 1509 /* 1510 * Write out changes to the DDT that may be required as a 1511 * result of the blocks freed. This ensures that the DDT 1512 * is clean when a scrub/resilver runs. 1513 */ 1514 ddt_sync(spa, tx->tx_txg); 1515 } 1516 if (err != 0) 1517 return; 1518 if (!scn->scn_async_destroying && zfs_free_leak_on_eio && 1519 (dp->dp_free_dir->dd_phys->dd_used_bytes != 0 || 1520 dp->dp_free_dir->dd_phys->dd_compressed_bytes != 0 || 1521 dp->dp_free_dir->dd_phys->dd_uncompressed_bytes != 0)) { 1522 /* 1523 * We have finished background destroying, but there is still 1524 * some space left in the dp_free_dir. Transfer this leaked 1525 * space to the dp_leak_dir. 1526 */ 1527 if (dp->dp_leak_dir == NULL) { 1528 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 1529 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 1530 LEAK_DIR_NAME, tx); 1531 VERIFY0(dsl_pool_open_special_dir(dp, 1532 LEAK_DIR_NAME, &dp->dp_leak_dir)); 1533 rrw_exit(&dp->dp_config_rwlock, FTAG); 1534 } 1535 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 1536 dp->dp_free_dir->dd_phys->dd_used_bytes, 1537 dp->dp_free_dir->dd_phys->dd_compressed_bytes, 1538 dp->dp_free_dir->dd_phys->dd_uncompressed_bytes, tx); 1539 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 1540 -dp->dp_free_dir->dd_phys->dd_used_bytes, 1541 -dp->dp_free_dir->dd_phys->dd_compressed_bytes, 1542 -dp->dp_free_dir->dd_phys->dd_uncompressed_bytes, tx); 1543 } 1544 if (!scn->scn_async_destroying) { 1545 /* finished; verify that space accounting went to zero */ 1546 ASSERT0(dp->dp_free_dir->dd_phys->dd_used_bytes); 1547 ASSERT0(dp->dp_free_dir->dd_phys->dd_compressed_bytes); 1548 ASSERT0(dp->dp_free_dir->dd_phys->dd_uncompressed_bytes); 1549 } 1550 1551 if (scn->scn_phys.scn_state != DSS_SCANNING) 1552 return; 1553 1554 if (scn->scn_done_txg == tx->tx_txg) { 1555 ASSERT(!scn->scn_pausing); 1556 /* finished with scan. */ 1557 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg); 1558 dsl_scan_done(scn, B_TRUE, tx); 1559 ASSERT3U(spa->spa_scrub_inflight, ==, 0); 1560 dsl_scan_sync_state(scn, tx); 1561 return; 1562 } 1563 1564 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1565 scn->scn_phys.scn_ddt_class_max) { 1566 zfs_dbgmsg("doing scan sync txg %llu; " 1567 "ddt bm=%llu/%llu/%llu/%llx", 1568 (longlong_t)tx->tx_txg, 1569 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1570 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1571 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1572 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1573 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1574 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1575 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1576 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1577 } else { 1578 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1579 (longlong_t)tx->tx_txg, 1580 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1581 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1582 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1583 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1584 } 1585 1586 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1587 NULL, ZIO_FLAG_CANFAIL); 1588 dsl_pool_config_enter(dp, FTAG); 1589 dsl_scan_visit(scn, tx); 1590 dsl_pool_config_exit(dp, FTAG); 1591 (void) zio_wait(scn->scn_zio_root); 1592 scn->scn_zio_root = NULL; 1593 1594 zfs_dbgmsg("visited %llu blocks in %llums", 1595 (longlong_t)scn->scn_visited_this_txg, 1596 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time)); 1597 1598 if (!scn->scn_pausing) { 1599 scn->scn_done_txg = tx->tx_txg + 1; 1600 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu", 1601 tx->tx_txg, scn->scn_done_txg); 1602 } 1603 1604 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1605 mutex_enter(&spa->spa_scrub_lock); 1606 while (spa->spa_scrub_inflight > 0) { 1607 cv_wait(&spa->spa_scrub_io_cv, 1608 &spa->spa_scrub_lock); 1609 } 1610 mutex_exit(&spa->spa_scrub_lock); 1611 } 1612 1613 dsl_scan_sync_state(scn, tx); 1614} 1615 1616/* 1617 * This will start a new scan, or restart an existing one. 1618 */ 1619void 1620dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1621{ 1622 if (txg == 0) { 1623 dmu_tx_t *tx; 1624 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1625 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1626 1627 txg = dmu_tx_get_txg(tx); 1628 dp->dp_scan->scn_restart_txg = txg; 1629 dmu_tx_commit(tx); 1630 } else { 1631 dp->dp_scan->scn_restart_txg = txg; 1632 } 1633 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1634} 1635 1636boolean_t 1637dsl_scan_resilvering(dsl_pool_t *dp) 1638{ 1639 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1640 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1641} 1642 1643/* 1644 * scrub consumers 1645 */ 1646 1647static void 1648count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1649{ 1650 int i; 1651 1652 /* 1653 * If we resume after a reboot, zab will be NULL; don't record 1654 * incomplete stats in that case. 1655 */ 1656 if (zab == NULL) 1657 return; 1658 1659 for (i = 0; i < 4; i++) { 1660 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1661 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1662 if (t & DMU_OT_NEWTYPE) 1663 t = DMU_OT_OTHER; 1664 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1665 int equal; 1666 1667 zb->zb_count++; 1668 zb->zb_asize += BP_GET_ASIZE(bp); 1669 zb->zb_lsize += BP_GET_LSIZE(bp); 1670 zb->zb_psize += BP_GET_PSIZE(bp); 1671 zb->zb_gangs += BP_COUNT_GANG(bp); 1672 1673 switch (BP_GET_NDVAS(bp)) { 1674 case 2: 1675 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1676 DVA_GET_VDEV(&bp->blk_dva[1])) 1677 zb->zb_ditto_2_of_2_samevdev++; 1678 break; 1679 case 3: 1680 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1681 DVA_GET_VDEV(&bp->blk_dva[1])) + 1682 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1683 DVA_GET_VDEV(&bp->blk_dva[2])) + 1684 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1685 DVA_GET_VDEV(&bp->blk_dva[2])); 1686 if (equal == 1) 1687 zb->zb_ditto_2_of_3_samevdev++; 1688 else if (equal == 3) 1689 zb->zb_ditto_3_of_3_samevdev++; 1690 break; 1691 } 1692 } 1693} 1694 1695static void 1696dsl_scan_scrub_done(zio_t *zio) 1697{ 1698 spa_t *spa = zio->io_spa; 1699 1700 zio_data_buf_free(zio->io_data, zio->io_size); 1701 1702 mutex_enter(&spa->spa_scrub_lock); 1703 spa->spa_scrub_inflight--; 1704 cv_broadcast(&spa->spa_scrub_io_cv); 1705 1706 if (zio->io_error && (zio->io_error != ECKSUM || 1707 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1708 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1709 } 1710 mutex_exit(&spa->spa_scrub_lock); 1711} 1712 1713static int 1714dsl_scan_scrub_cb(dsl_pool_t *dp, 1715 const blkptr_t *bp, const zbookmark_phys_t *zb) 1716{ 1717 dsl_scan_t *scn = dp->dp_scan; 1718 size_t size = BP_GET_PSIZE(bp); 1719 spa_t *spa = dp->dp_spa; 1720 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1721 boolean_t needs_io; 1722 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1723 unsigned int scan_delay = 0; 1724 1725 if (phys_birth <= scn->scn_phys.scn_min_txg || 1726 phys_birth >= scn->scn_phys.scn_max_txg) 1727 return (0); 1728 1729 count_block(dp->dp_blkstats, bp); 1730 1731 if (BP_IS_EMBEDDED(bp)) 1732 return (0); 1733 1734 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1735 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1736 zio_flags |= ZIO_FLAG_SCRUB; 1737 needs_io = B_TRUE; 1738 scan_delay = zfs_scrub_delay; 1739 } else { 1740 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 1741 zio_flags |= ZIO_FLAG_RESILVER; 1742 needs_io = B_FALSE; 1743 scan_delay = zfs_resilver_delay; 1744 } 1745 1746 /* If it's an intent log block, failure is expected. */ 1747 if (zb->zb_level == ZB_ZIL_LEVEL) 1748 zio_flags |= ZIO_FLAG_SPECULATIVE; 1749 1750 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1751 vdev_t *vd = vdev_lookup_top(spa, 1752 DVA_GET_VDEV(&bp->blk_dva[d])); 1753 1754 /* 1755 * Keep track of how much data we've examined so that 1756 * zpool(1M) status can make useful progress reports. 1757 */ 1758 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1759 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1760 1761 /* if it's a resilver, this may not be in the target range */ 1762 if (!needs_io) { 1763 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1764 /* 1765 * Gang members may be spread across multiple 1766 * vdevs, so the best estimate we have is the 1767 * scrub range, which has already been checked. 1768 * XXX -- it would be better to change our 1769 * allocation policy to ensure that all 1770 * gang members reside on the same vdev. 1771 */ 1772 needs_io = B_TRUE; 1773 } else { 1774 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1775 phys_birth, 1); 1776 } 1777 } 1778 } 1779 1780 if (needs_io && !zfs_no_scrub_io) { 1781 vdev_t *rvd = spa->spa_root_vdev; 1782 uint64_t maxinflight = rvd->vdev_children * 1783 MAX(zfs_top_maxinflight, 1); 1784 void *data = zio_data_buf_alloc(size); 1785 1786 mutex_enter(&spa->spa_scrub_lock); 1787 while (spa->spa_scrub_inflight >= maxinflight) 1788 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1789 spa->spa_scrub_inflight++; 1790 mutex_exit(&spa->spa_scrub_lock); 1791 1792 /* 1793 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1794 * then throttle our workload to limit the impact of a scan. 1795 */ 1796 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1797 delay(MAX((int)scan_delay, 0)); 1798 1799 zio_nowait(zio_read(NULL, spa, bp, data, size, 1800 dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB, 1801 zio_flags, zb)); 1802 } 1803 1804 /* do not relocate this block */ 1805 return (0); 1806} 1807 1808int 1809dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 1810{ 1811 spa_t *spa = dp->dp_spa; 1812 1813 /* 1814 * Purge all vdev caches and probe all devices. We do this here 1815 * rather than in sync context because this requires a writer lock 1816 * on the spa_config lock, which we can't do from sync context. The 1817 * spa_scrub_reopen flag indicates that vdev_open() should not 1818 * attempt to start another scrub. 1819 */ 1820 spa_vdev_state_enter(spa, SCL_NONE); 1821 spa->spa_scrub_reopen = B_TRUE; 1822 vdev_reopen(spa->spa_root_vdev); 1823 spa->spa_scrub_reopen = B_FALSE; 1824 (void) spa_vdev_state_exit(spa, NULL, 0); 1825 1826 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 1827 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE)); 1828} 1829