dsl_scan.c revision 297109
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2016 Gary Mills 25 */ 26 27#include <sys/dsl_scan.h> 28#include <sys/dsl_pool.h> 29#include <sys/dsl_dataset.h> 30#include <sys/dsl_prop.h> 31#include <sys/dsl_dir.h> 32#include <sys/dsl_synctask.h> 33#include <sys/dnode.h> 34#include <sys/dmu_tx.h> 35#include <sys/dmu_objset.h> 36#include <sys/arc.h> 37#include <sys/zap.h> 38#include <sys/zio.h> 39#include <sys/zfs_context.h> 40#include <sys/fs/zfs.h> 41#include <sys/zfs_znode.h> 42#include <sys/spa_impl.h> 43#include <sys/vdev_impl.h> 44#include <sys/zil_impl.h> 45#include <sys/zio_checksum.h> 46#include <sys/ddt.h> 47#include <sys/sa.h> 48#include <sys/sa_impl.h> 49#include <sys/zfeature.h> 50#ifdef _KERNEL 51#include <sys/zfs_vfsops.h> 52#endif 53 54typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 55 const zbookmark_phys_t *); 56 57static scan_cb_t dsl_scan_scrub_cb; 58static void dsl_scan_cancel_sync(void *, dmu_tx_t *); 59static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); 60 61unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 62unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 63unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 64unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 65 66unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 67unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 68unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver 69 per txg */ 70boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 71boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 72 73SYSCTL_DECL(_vfs_zfs); 74TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight); 75SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW, 76 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 77TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay); 78SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW, 79 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 80TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay); 81SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW, 82 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 83TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle); 84SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW, 85 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 86TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms); 87SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW, 88 &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); 89TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms); 90SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW, 91 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 92TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms); 93SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW, 94 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 95TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io); 96SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW, 97 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 98TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch); 99SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW, 100 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 101 102enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 103/* max number of blocks to free in a single TXG */ 104uint64_t zfs_free_max_blocks = UINT64_MAX; 105SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, free_max_blocks, CTLFLAG_RWTUN, 106 &zfs_free_max_blocks, 0, "Maximum number of blocks to free in one TXG"); 107 108 109#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 110 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 111 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 112 113extern int zfs_txg_timeout; 114 115/* 116 * Enable/disable the processing of the free_bpobj object. 117 */ 118boolean_t zfs_free_bpobj_enabled = B_TRUE; 119 120SYSCTL_INT(_vfs_zfs, OID_AUTO, free_bpobj_enabled, CTLFLAG_RWTUN, 121 &zfs_free_bpobj_enabled, 0, "Enable free_bpobj processing"); 122 123/* the order has to match pool_scan_type */ 124static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 125 NULL, 126 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 127 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 128}; 129 130int 131dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 132{ 133 int err; 134 dsl_scan_t *scn; 135 spa_t *spa = dp->dp_spa; 136 uint64_t f; 137 138 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 139 scn->scn_dp = dp; 140 141 /* 142 * It's possible that we're resuming a scan after a reboot so 143 * make sure that the scan_async_destroying flag is initialized 144 * appropriately. 145 */ 146 ASSERT(!scn->scn_async_destroying); 147 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 148 SPA_FEATURE_ASYNC_DESTROY); 149 150 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 151 "scrub_func", sizeof (uint64_t), 1, &f); 152 if (err == 0) { 153 /* 154 * There was an old-style scrub in progress. Restart a 155 * new-style scrub from the beginning. 156 */ 157 scn->scn_restart_txg = txg; 158 zfs_dbgmsg("old-style scrub was in progress; " 159 "restarting new-style scrub in txg %llu", 160 scn->scn_restart_txg); 161 162 /* 163 * Load the queue obj from the old location so that it 164 * can be freed by dsl_scan_done(). 165 */ 166 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 167 "scrub_queue", sizeof (uint64_t), 1, 168 &scn->scn_phys.scn_queue_obj); 169 } else { 170 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 171 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 172 &scn->scn_phys); 173 if (err == ENOENT) 174 return (0); 175 else if (err) 176 return (err); 177 178 if (scn->scn_phys.scn_state == DSS_SCANNING && 179 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 180 /* 181 * A new-type scrub was in progress on an old 182 * pool, and the pool was accessed by old 183 * software. Restart from the beginning, since 184 * the old software may have changed the pool in 185 * the meantime. 186 */ 187 scn->scn_restart_txg = txg; 188 zfs_dbgmsg("new-style scrub was modified " 189 "by old software; restarting in txg %llu", 190 scn->scn_restart_txg); 191 } 192 } 193 194 spa_scan_stat_init(spa); 195 return (0); 196} 197 198void 199dsl_scan_fini(dsl_pool_t *dp) 200{ 201 if (dp->dp_scan) { 202 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 203 dp->dp_scan = NULL; 204 } 205} 206 207/* ARGSUSED */ 208static int 209dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 210{ 211 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 212 213 if (scn->scn_phys.scn_state == DSS_SCANNING) 214 return (SET_ERROR(EBUSY)); 215 216 return (0); 217} 218 219static void 220dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 221{ 222 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 223 pool_scan_func_t *funcp = arg; 224 dmu_object_type_t ot = 0; 225 dsl_pool_t *dp = scn->scn_dp; 226 spa_t *spa = dp->dp_spa; 227 228 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 229 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 230 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 231 scn->scn_phys.scn_func = *funcp; 232 scn->scn_phys.scn_state = DSS_SCANNING; 233 scn->scn_phys.scn_min_txg = 0; 234 scn->scn_phys.scn_max_txg = tx->tx_txg; 235 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 236 scn->scn_phys.scn_start_time = gethrestime_sec(); 237 scn->scn_phys.scn_errors = 0; 238 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 239 scn->scn_restart_txg = 0; 240 scn->scn_done_txg = 0; 241 spa_scan_stat_init(spa); 242 243 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 244 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 245 246 /* rewrite all disk labels */ 247 vdev_config_dirty(spa->spa_root_vdev); 248 249 if (vdev_resilver_needed(spa->spa_root_vdev, 250 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 251 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 252 } else { 253 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); 254 } 255 256 spa->spa_scrub_started = B_TRUE; 257 /* 258 * If this is an incremental scrub, limit the DDT scrub phase 259 * to just the auto-ditto class (for correctness); the rest 260 * of the scrub should go faster using top-down pruning. 261 */ 262 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 263 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 264 265 } 266 267 /* back to the generic stuff */ 268 269 if (dp->dp_blkstats == NULL) { 270 dp->dp_blkstats = 271 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 272 } 273 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 274 275 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 276 ot = DMU_OT_ZAP_OTHER; 277 278 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 279 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 280 281 dsl_scan_sync_state(scn, tx); 282 283 spa_history_log_internal(spa, "scan setup", tx, 284 "func=%u mintxg=%llu maxtxg=%llu", 285 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 286} 287 288/* ARGSUSED */ 289static void 290dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 291{ 292 static const char *old_names[] = { 293 "scrub_bookmark", 294 "scrub_ddt_bookmark", 295 "scrub_ddt_class_max", 296 "scrub_queue", 297 "scrub_min_txg", 298 "scrub_max_txg", 299 "scrub_func", 300 "scrub_errors", 301 NULL 302 }; 303 304 dsl_pool_t *dp = scn->scn_dp; 305 spa_t *spa = dp->dp_spa; 306 int i; 307 308 /* Remove any remnants of an old-style scrub. */ 309 for (i = 0; old_names[i]; i++) { 310 (void) zap_remove(dp->dp_meta_objset, 311 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 312 } 313 314 if (scn->scn_phys.scn_queue_obj != 0) { 315 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 316 scn->scn_phys.scn_queue_obj, tx)); 317 scn->scn_phys.scn_queue_obj = 0; 318 } 319 320 /* 321 * If we were "restarted" from a stopped state, don't bother 322 * with anything else. 323 */ 324 if (scn->scn_phys.scn_state != DSS_SCANNING) 325 return; 326 327 if (complete) 328 scn->scn_phys.scn_state = DSS_FINISHED; 329 else 330 scn->scn_phys.scn_state = DSS_CANCELED; 331 332 spa_history_log_internal(spa, "scan done", tx, 333 "complete=%u", complete); 334 335 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 336 mutex_enter(&spa->spa_scrub_lock); 337 while (spa->spa_scrub_inflight > 0) { 338 cv_wait(&spa->spa_scrub_io_cv, 339 &spa->spa_scrub_lock); 340 } 341 mutex_exit(&spa->spa_scrub_lock); 342 spa->spa_scrub_started = B_FALSE; 343 spa->spa_scrub_active = B_FALSE; 344 345 /* 346 * If the scrub/resilver completed, update all DTLs to 347 * reflect this. Whether it succeeded or not, vacate 348 * all temporary scrub DTLs. 349 */ 350 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 351 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 352 if (complete) { 353 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? 354 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 355 } 356 spa_errlog_rotate(spa); 357 358 /* 359 * We may have finished replacing a device. 360 * Let the async thread assess this and handle the detach. 361 */ 362 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 363 } 364 365 scn->scn_phys.scn_end_time = gethrestime_sec(); 366} 367 368/* ARGSUSED */ 369static int 370dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 371{ 372 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 373 374 if (scn->scn_phys.scn_state != DSS_SCANNING) 375 return (SET_ERROR(ENOENT)); 376 return (0); 377} 378 379/* ARGSUSED */ 380static void 381dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 382{ 383 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 384 385 dsl_scan_done(scn, B_FALSE, tx); 386 dsl_scan_sync_state(scn, tx); 387} 388 389int 390dsl_scan_cancel(dsl_pool_t *dp) 391{ 392 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 393 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 394} 395 396static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 397 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 398 dmu_objset_type_t ostype, dmu_tx_t *tx); 399static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 400 dmu_objset_type_t ostype, 401 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 402 403void 404dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 405{ 406 zio_free(dp->dp_spa, txg, bp); 407} 408 409void 410dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 411{ 412 ASSERT(dsl_pool_sync_context(dp)); 413 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 414 pio->io_flags)); 415} 416 417static uint64_t 418dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 419{ 420 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 421 if (ds->ds_is_snapshot) 422 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 423 return (smt); 424} 425 426static void 427dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 428{ 429 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 430 DMU_POOL_DIRECTORY_OBJECT, 431 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 432 &scn->scn_phys, tx)); 433} 434 435extern int zfs_vdev_async_write_active_min_dirty_percent; 436 437static boolean_t 438dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_phys_t *zb) 439{ 440 /* we never skip user/group accounting objects */ 441 if (zb && (int64_t)zb->zb_object < 0) 442 return (B_FALSE); 443 444 if (scn->scn_pausing) 445 return (B_TRUE); /* we're already pausing */ 446 447 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 448 return (B_FALSE); /* we're resuming */ 449 450 /* We only know how to resume from level-0 blocks. */ 451 if (zb && zb->zb_level != 0) 452 return (B_FALSE); 453 454 /* 455 * We pause if: 456 * - we have scanned for the maximum time: an entire txg 457 * timeout (default 5 sec) 458 * or 459 * - we have scanned for at least the minimum time (default 1 sec 460 * for scrub, 3 sec for resilver), and either we have sufficient 461 * dirty data that we are starting to write more quickly 462 * (default 30%), or someone is explicitly waiting for this txg 463 * to complete. 464 * or 465 * - the spa is shutting down because this pool is being exported 466 * or the machine is rebooting. 467 */ 468 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 469 zfs_resilver_min_time_ms : zfs_scan_min_time_ms; 470 uint64_t elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 471 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 472 if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout || 473 (NSEC2MSEC(elapsed_nanosecs) > mintime && 474 (txg_sync_waiting(scn->scn_dp) || 475 dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent)) || 476 spa_shutting_down(scn->scn_dp->dp_spa)) { 477 if (zb) { 478 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n", 479 (longlong_t)zb->zb_objset, 480 (longlong_t)zb->zb_object, 481 (longlong_t)zb->zb_level, 482 (longlong_t)zb->zb_blkid); 483 scn->scn_phys.scn_bookmark = *zb; 484 } 485 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n", 486 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 487 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 488 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 489 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 490 scn->scn_pausing = B_TRUE; 491 return (B_TRUE); 492 } 493 return (B_FALSE); 494} 495 496typedef struct zil_scan_arg { 497 dsl_pool_t *zsa_dp; 498 zil_header_t *zsa_zh; 499} zil_scan_arg_t; 500 501/* ARGSUSED */ 502static int 503dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 504{ 505 zil_scan_arg_t *zsa = arg; 506 dsl_pool_t *dp = zsa->zsa_dp; 507 dsl_scan_t *scn = dp->dp_scan; 508 zil_header_t *zh = zsa->zsa_zh; 509 zbookmark_phys_t zb; 510 511 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 512 return (0); 513 514 /* 515 * One block ("stubby") can be allocated a long time ago; we 516 * want to visit that one because it has been allocated 517 * (on-disk) even if it hasn't been claimed (even though for 518 * scrub there's nothing to do to it). 519 */ 520 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 521 return (0); 522 523 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 524 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 525 526 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 527 return (0); 528} 529 530/* ARGSUSED */ 531static int 532dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 533{ 534 if (lrc->lrc_txtype == TX_WRITE) { 535 zil_scan_arg_t *zsa = arg; 536 dsl_pool_t *dp = zsa->zsa_dp; 537 dsl_scan_t *scn = dp->dp_scan; 538 zil_header_t *zh = zsa->zsa_zh; 539 lr_write_t *lr = (lr_write_t *)lrc; 540 blkptr_t *bp = &lr->lr_blkptr; 541 zbookmark_phys_t zb; 542 543 if (BP_IS_HOLE(bp) || 544 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 545 return (0); 546 547 /* 548 * birth can be < claim_txg if this record's txg is 549 * already txg sync'ed (but this log block contains 550 * other records that are not synced) 551 */ 552 if (claim_txg == 0 || bp->blk_birth < claim_txg) 553 return (0); 554 555 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 556 lr->lr_foid, ZB_ZIL_LEVEL, 557 lr->lr_offset / BP_GET_LSIZE(bp)); 558 559 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 560 } 561 return (0); 562} 563 564static void 565dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 566{ 567 uint64_t claim_txg = zh->zh_claim_txg; 568 zil_scan_arg_t zsa = { dp, zh }; 569 zilog_t *zilog; 570 571 /* 572 * We only want to visit blocks that have been claimed but not yet 573 * replayed (or, in read-only mode, blocks that *would* be claimed). 574 */ 575 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 576 return; 577 578 zilog = zil_alloc(dp->dp_meta_objset, zh); 579 580 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 581 claim_txg); 582 583 zil_free(zilog); 584} 585 586/* ARGSUSED */ 587static void 588dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 589 uint64_t objset, uint64_t object, uint64_t blkid) 590{ 591 zbookmark_phys_t czb; 592 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 593 594 if (zfs_no_scrub_prefetch) 595 return; 596 597 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 598 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 599 return; 600 601 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid); 602 603 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 604 NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 605 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb); 606} 607 608static boolean_t 609dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 610 const zbookmark_phys_t *zb) 611{ 612 /* 613 * We never skip over user/group accounting objects (obj<0) 614 */ 615 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 616 (int64_t)zb->zb_object >= 0) { 617 /* 618 * If we already visited this bp & everything below (in 619 * a prior txg sync), don't bother doing it again. 620 */ 621 if (zbookmark_subtree_completed(dnp, zb, 622 &scn->scn_phys.scn_bookmark)) 623 return (B_TRUE); 624 625 /* 626 * If we found the block we're trying to resume from, or 627 * we went past it to a different object, zero it out to 628 * indicate that it's OK to start checking for pausing 629 * again. 630 */ 631 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 632 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 633 dprintf("resuming at %llx/%llx/%llx/%llx\n", 634 (longlong_t)zb->zb_objset, 635 (longlong_t)zb->zb_object, 636 (longlong_t)zb->zb_level, 637 (longlong_t)zb->zb_blkid); 638 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 639 } 640 } 641 return (B_FALSE); 642} 643 644/* 645 * Return nonzero on i/o error. 646 * Return new buf to write out in *bufp. 647 */ 648static int 649dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 650 dnode_phys_t *dnp, const blkptr_t *bp, 651 const zbookmark_phys_t *zb, dmu_tx_t *tx) 652{ 653 dsl_pool_t *dp = scn->scn_dp; 654 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 655 int err; 656 657 if (BP_GET_LEVEL(bp) > 0) { 658 arc_flags_t flags = ARC_FLAG_WAIT; 659 int i; 660 blkptr_t *cbp; 661 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 662 arc_buf_t *buf; 663 664 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 665 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 666 if (err) { 667 scn->scn_phys.scn_errors++; 668 return (err); 669 } 670 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 671 dsl_scan_prefetch(scn, buf, cbp, zb->zb_objset, 672 zb->zb_object, zb->zb_blkid * epb + i); 673 } 674 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 675 zbookmark_phys_t czb; 676 677 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 678 zb->zb_level - 1, 679 zb->zb_blkid * epb + i); 680 dsl_scan_visitbp(cbp, &czb, dnp, 681 ds, scn, ostype, tx); 682 } 683 (void) arc_buf_remove_ref(buf, &buf); 684 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 685 arc_flags_t flags = ARC_FLAG_WAIT; 686 dnode_phys_t *cdnp; 687 int i, j; 688 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 689 arc_buf_t *buf; 690 691 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 692 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 693 if (err) { 694 scn->scn_phys.scn_errors++; 695 return (err); 696 } 697 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 698 for (j = 0; j < cdnp->dn_nblkptr; j++) { 699 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 700 dsl_scan_prefetch(scn, buf, cbp, 701 zb->zb_objset, zb->zb_blkid * epb + i, j); 702 } 703 } 704 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 705 dsl_scan_visitdnode(scn, ds, ostype, 706 cdnp, zb->zb_blkid * epb + i, tx); 707 } 708 709 (void) arc_buf_remove_ref(buf, &buf); 710 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 711 arc_flags_t flags = ARC_FLAG_WAIT; 712 objset_phys_t *osp; 713 arc_buf_t *buf; 714 715 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 716 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 717 if (err) { 718 scn->scn_phys.scn_errors++; 719 return (err); 720 } 721 722 osp = buf->b_data; 723 724 dsl_scan_visitdnode(scn, ds, osp->os_type, 725 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 726 727 if (OBJSET_BUF_HAS_USERUSED(buf)) { 728 /* 729 * We also always visit user/group accounting 730 * objects, and never skip them, even if we are 731 * pausing. This is necessary so that the space 732 * deltas from this txg get integrated. 733 */ 734 dsl_scan_visitdnode(scn, ds, osp->os_type, 735 &osp->os_groupused_dnode, 736 DMU_GROUPUSED_OBJECT, tx); 737 dsl_scan_visitdnode(scn, ds, osp->os_type, 738 &osp->os_userused_dnode, 739 DMU_USERUSED_OBJECT, tx); 740 } 741 (void) arc_buf_remove_ref(buf, &buf); 742 } 743 744 return (0); 745} 746 747static void 748dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 749 dmu_objset_type_t ostype, dnode_phys_t *dnp, 750 uint64_t object, dmu_tx_t *tx) 751{ 752 int j; 753 754 for (j = 0; j < dnp->dn_nblkptr; j++) { 755 zbookmark_phys_t czb; 756 757 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 758 dnp->dn_nlevels - 1, j); 759 dsl_scan_visitbp(&dnp->dn_blkptr[j], 760 &czb, dnp, ds, scn, ostype, tx); 761 } 762 763 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 764 zbookmark_phys_t czb; 765 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 766 0, DMU_SPILL_BLKID); 767 dsl_scan_visitbp(&dnp->dn_spill, 768 &czb, dnp, ds, scn, ostype, tx); 769 } 770} 771 772/* 773 * The arguments are in this order because mdb can only print the 774 * first 5; we want them to be useful. 775 */ 776static void 777dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 778 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 779 dmu_objset_type_t ostype, dmu_tx_t *tx) 780{ 781 dsl_pool_t *dp = scn->scn_dp; 782 arc_buf_t *buf = NULL; 783 blkptr_t bp_toread = *bp; 784 785 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */ 786 787 if (dsl_scan_check_pause(scn, zb)) 788 return; 789 790 if (dsl_scan_check_resume(scn, dnp, zb)) 791 return; 792 793 if (BP_IS_HOLE(bp)) 794 return; 795 796 scn->scn_visited_this_txg++; 797 798 dprintf_bp(bp, 799 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 800 ds, ds ? ds->ds_object : 0, 801 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 802 bp); 803 804 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 805 return; 806 807 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx) != 0) 808 return; 809 810 /* 811 * If dsl_scan_ddt() has aready visited this block, it will have 812 * already done any translations or scrubbing, so don't call the 813 * callback again. 814 */ 815 if (ddt_class_contains(dp->dp_spa, 816 scn->scn_phys.scn_ddt_class_max, bp)) { 817 ASSERT(buf == NULL); 818 return; 819 } 820 821 /* 822 * If this block is from the future (after cur_max_txg), then we 823 * are doing this on behalf of a deleted snapshot, and we will 824 * revisit the future block on the next pass of this dataset. 825 * Don't scan it now unless we need to because something 826 * under it was modified. 827 */ 828 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) { 829 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 830 } 831} 832 833static void 834dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 835 dmu_tx_t *tx) 836{ 837 zbookmark_phys_t zb; 838 839 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 840 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 841 dsl_scan_visitbp(bp, &zb, NULL, 842 ds, scn, DMU_OST_NONE, tx); 843 844 dprintf_ds(ds, "finished scan%s", ""); 845} 846 847void 848dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 849{ 850 dsl_pool_t *dp = ds->ds_dir->dd_pool; 851 dsl_scan_t *scn = dp->dp_scan; 852 uint64_t mintxg; 853 854 if (scn->scn_phys.scn_state != DSS_SCANNING) 855 return; 856 857 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 858 if (ds->ds_is_snapshot) { 859 /* Note, scn_cur_{min,max}_txg stays the same. */ 860 scn->scn_phys.scn_bookmark.zb_objset = 861 dsl_dataset_phys(ds)->ds_next_snap_obj; 862 zfs_dbgmsg("destroying ds %llu; currently traversing; " 863 "reset zb_objset to %llu", 864 (u_longlong_t)ds->ds_object, 865 (u_longlong_t)dsl_dataset_phys(ds)-> 866 ds_next_snap_obj); 867 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 868 } else { 869 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 870 ZB_DESTROYED_OBJSET, 0, 0, 0); 871 zfs_dbgmsg("destroying ds %llu; currently traversing; " 872 "reset bookmark to -1,0,0,0", 873 (u_longlong_t)ds->ds_object); 874 } 875 } else if (zap_lookup_int_key(dp->dp_meta_objset, 876 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 877 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 878 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 879 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 880 if (ds->ds_is_snapshot) { 881 /* 882 * We keep the same mintxg; it could be > 883 * ds_creation_txg if the previous snapshot was 884 * deleted too. 885 */ 886 VERIFY(zap_add_int_key(dp->dp_meta_objset, 887 scn->scn_phys.scn_queue_obj, 888 dsl_dataset_phys(ds)->ds_next_snap_obj, 889 mintxg, tx) == 0); 890 zfs_dbgmsg("destroying ds %llu; in queue; " 891 "replacing with %llu", 892 (u_longlong_t)ds->ds_object, 893 (u_longlong_t)dsl_dataset_phys(ds)-> 894 ds_next_snap_obj); 895 } else { 896 zfs_dbgmsg("destroying ds %llu; in queue; removing", 897 (u_longlong_t)ds->ds_object); 898 } 899 } else { 900 zfs_dbgmsg("destroying ds %llu; ignoring", 901 (u_longlong_t)ds->ds_object); 902 } 903 904 /* 905 * dsl_scan_sync() should be called after this, and should sync 906 * out our changed state, but just to be safe, do it here. 907 */ 908 dsl_scan_sync_state(scn, tx); 909} 910 911void 912dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 913{ 914 dsl_pool_t *dp = ds->ds_dir->dd_pool; 915 dsl_scan_t *scn = dp->dp_scan; 916 uint64_t mintxg; 917 918 if (scn->scn_phys.scn_state != DSS_SCANNING) 919 return; 920 921 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 922 923 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 924 scn->scn_phys.scn_bookmark.zb_objset = 925 dsl_dataset_phys(ds)->ds_prev_snap_obj; 926 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 927 "reset zb_objset to %llu", 928 (u_longlong_t)ds->ds_object, 929 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 930 } else if (zap_lookup_int_key(dp->dp_meta_objset, 931 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 932 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 933 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 934 VERIFY(zap_add_int_key(dp->dp_meta_objset, 935 scn->scn_phys.scn_queue_obj, 936 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 937 zfs_dbgmsg("snapshotting ds %llu; in queue; " 938 "replacing with %llu", 939 (u_longlong_t)ds->ds_object, 940 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 941 } 942 dsl_scan_sync_state(scn, tx); 943} 944 945void 946dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 947{ 948 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 949 dsl_scan_t *scn = dp->dp_scan; 950 uint64_t mintxg; 951 952 if (scn->scn_phys.scn_state != DSS_SCANNING) 953 return; 954 955 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 956 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 957 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 958 "reset zb_objset to %llu", 959 (u_longlong_t)ds1->ds_object, 960 (u_longlong_t)ds2->ds_object); 961 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 962 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 963 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 964 "reset zb_objset to %llu", 965 (u_longlong_t)ds2->ds_object, 966 (u_longlong_t)ds1->ds_object); 967 } 968 969 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 970 ds1->ds_object, &mintxg) == 0) { 971 int err; 972 973 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 974 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 975 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 976 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 977 err = zap_add_int_key(dp->dp_meta_objset, 978 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 979 VERIFY(err == 0 || err == EEXIST); 980 if (err == EEXIST) { 981 /* Both were there to begin with */ 982 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 983 scn->scn_phys.scn_queue_obj, 984 ds1->ds_object, mintxg, tx)); 985 } 986 zfs_dbgmsg("clone_swap ds %llu; in queue; " 987 "replacing with %llu", 988 (u_longlong_t)ds1->ds_object, 989 (u_longlong_t)ds2->ds_object); 990 } else if (zap_lookup_int_key(dp->dp_meta_objset, 991 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 992 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 993 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 994 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 995 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 996 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 997 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 998 zfs_dbgmsg("clone_swap ds %llu; in queue; " 999 "replacing with %llu", 1000 (u_longlong_t)ds2->ds_object, 1001 (u_longlong_t)ds1->ds_object); 1002 } 1003 1004 dsl_scan_sync_state(scn, tx); 1005} 1006 1007struct enqueue_clones_arg { 1008 dmu_tx_t *tx; 1009 uint64_t originobj; 1010}; 1011 1012/* ARGSUSED */ 1013static int 1014enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1015{ 1016 struct enqueue_clones_arg *eca = arg; 1017 dsl_dataset_t *ds; 1018 int err; 1019 dsl_scan_t *scn = dp->dp_scan; 1020 1021 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != eca->originobj) 1022 return (0); 1023 1024 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1025 if (err) 1026 return (err); 1027 1028 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != eca->originobj) { 1029 dsl_dataset_t *prev; 1030 err = dsl_dataset_hold_obj(dp, 1031 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 1032 1033 dsl_dataset_rele(ds, FTAG); 1034 if (err) 1035 return (err); 1036 ds = prev; 1037 } 1038 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1039 scn->scn_phys.scn_queue_obj, ds->ds_object, 1040 dsl_dataset_phys(ds)->ds_prev_snap_txg, eca->tx) == 0); 1041 dsl_dataset_rele(ds, FTAG); 1042 return (0); 1043} 1044 1045static void 1046dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1047{ 1048 dsl_pool_t *dp = scn->scn_dp; 1049 dsl_dataset_t *ds; 1050 objset_t *os; 1051 1052 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1053 1054 if (dmu_objset_from_ds(ds, &os)) 1055 goto out; 1056 1057 /* 1058 * Only the ZIL in the head (non-snapshot) is valid. Even though 1059 * snapshots can have ZIL block pointers (which may be the same 1060 * BP as in the head), they must be ignored. So we traverse the 1061 * ZIL here, rather than in scan_recurse(), because the regular 1062 * snapshot block-sharing rules don't apply to it. 1063 */ 1064 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !ds->ds_is_snapshot) 1065 dsl_scan_zil(dp, &os->os_zil_header); 1066 1067 /* 1068 * Iterate over the bps in this ds. 1069 */ 1070 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1071 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 1072 1073 char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); 1074 dsl_dataset_name(ds, dsname); 1075 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1076 "pausing=%u", 1077 (longlong_t)dsobj, dsname, 1078 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1079 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1080 (int)scn->scn_pausing); 1081 kmem_free(dsname, ZFS_MAXNAMELEN); 1082 1083 if (scn->scn_pausing) 1084 goto out; 1085 1086 /* 1087 * We've finished this pass over this dataset. 1088 */ 1089 1090 /* 1091 * If we did not completely visit this dataset, do another pass. 1092 */ 1093 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1094 zfs_dbgmsg("incomplete pass; visiting again"); 1095 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1096 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1097 scn->scn_phys.scn_queue_obj, ds->ds_object, 1098 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1099 goto out; 1100 } 1101 1102 /* 1103 * Add descendent datasets to work queue. 1104 */ 1105 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 1106 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1107 scn->scn_phys.scn_queue_obj, 1108 dsl_dataset_phys(ds)->ds_next_snap_obj, 1109 dsl_dataset_phys(ds)->ds_creation_txg, tx) == 0); 1110 } 1111 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 1112 boolean_t usenext = B_FALSE; 1113 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 1114 uint64_t count; 1115 /* 1116 * A bug in a previous version of the code could 1117 * cause upgrade_clones_cb() to not set 1118 * ds_next_snap_obj when it should, leading to a 1119 * missing entry. Therefore we can only use the 1120 * next_clones_obj when its count is correct. 1121 */ 1122 int err = zap_count(dp->dp_meta_objset, 1123 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 1124 if (err == 0 && 1125 count == dsl_dataset_phys(ds)->ds_num_children - 1) 1126 usenext = B_TRUE; 1127 } 1128 1129 if (usenext) { 1130 VERIFY0(zap_join_key(dp->dp_meta_objset, 1131 dsl_dataset_phys(ds)->ds_next_clones_obj, 1132 scn->scn_phys.scn_queue_obj, 1133 dsl_dataset_phys(ds)->ds_creation_txg, tx)); 1134 } else { 1135 struct enqueue_clones_arg eca; 1136 eca.tx = tx; 1137 eca.originobj = ds->ds_object; 1138 1139 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1140 enqueue_clones_cb, &eca, DS_FIND_CHILDREN)); 1141 } 1142 } 1143 1144out: 1145 dsl_dataset_rele(ds, FTAG); 1146} 1147 1148/* ARGSUSED */ 1149static int 1150enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1151{ 1152 dmu_tx_t *tx = arg; 1153 dsl_dataset_t *ds; 1154 int err; 1155 dsl_scan_t *scn = dp->dp_scan; 1156 1157 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1158 if (err) 1159 return (err); 1160 1161 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 1162 dsl_dataset_t *prev; 1163 err = dsl_dataset_hold_obj(dp, 1164 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 1165 if (err) { 1166 dsl_dataset_rele(ds, FTAG); 1167 return (err); 1168 } 1169 1170 /* 1171 * If this is a clone, we don't need to worry about it for now. 1172 */ 1173 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 1174 dsl_dataset_rele(ds, FTAG); 1175 dsl_dataset_rele(prev, FTAG); 1176 return (0); 1177 } 1178 dsl_dataset_rele(ds, FTAG); 1179 ds = prev; 1180 } 1181 1182 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1183 ds->ds_object, dsl_dataset_phys(ds)->ds_prev_snap_txg, tx) == 0); 1184 dsl_dataset_rele(ds, FTAG); 1185 return (0); 1186} 1187 1188/* 1189 * Scrub/dedup interaction. 1190 * 1191 * If there are N references to a deduped block, we don't want to scrub it 1192 * N times -- ideally, we should scrub it exactly once. 1193 * 1194 * We leverage the fact that the dde's replication class (enum ddt_class) 1195 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1196 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1197 * 1198 * To prevent excess scrubbing, the scrub begins by walking the DDT 1199 * to find all blocks with refcnt > 1, and scrubs each of these once. 1200 * Since there are two replication classes which contain blocks with 1201 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1202 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1203 * 1204 * There would be nothing more to say if a block's refcnt couldn't change 1205 * during a scrub, but of course it can so we must account for changes 1206 * in a block's replication class. 1207 * 1208 * Here's an example of what can occur: 1209 * 1210 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1211 * when visited during the top-down scrub phase, it will be scrubbed twice. 1212 * This negates our scrub optimization, but is otherwise harmless. 1213 * 1214 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1215 * on each visit during the top-down scrub phase, it will never be scrubbed. 1216 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1217 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1218 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1219 * while a scrub is in progress, it scrubs the block right then. 1220 */ 1221static void 1222dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1223{ 1224 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1225 ddt_entry_t dde = { 0 }; 1226 int error; 1227 uint64_t n = 0; 1228 1229 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1230 ddt_t *ddt; 1231 1232 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1233 break; 1234 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1235 (longlong_t)ddb->ddb_class, 1236 (longlong_t)ddb->ddb_type, 1237 (longlong_t)ddb->ddb_checksum, 1238 (longlong_t)ddb->ddb_cursor); 1239 1240 /* There should be no pending changes to the dedup table */ 1241 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1242 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1243 1244 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1245 n++; 1246 1247 if (dsl_scan_check_pause(scn, NULL)) 1248 break; 1249 } 1250 1251 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u", 1252 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max, 1253 (int)scn->scn_pausing); 1254 1255 ASSERT(error == 0 || error == ENOENT); 1256 ASSERT(error != ENOENT || 1257 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1258} 1259 1260/* ARGSUSED */ 1261void 1262dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1263 ddt_entry_t *dde, dmu_tx_t *tx) 1264{ 1265 const ddt_key_t *ddk = &dde->dde_key; 1266 ddt_phys_t *ddp = dde->dde_phys; 1267 blkptr_t bp; 1268 zbookmark_phys_t zb = { 0 }; 1269 1270 if (scn->scn_phys.scn_state != DSS_SCANNING) 1271 return; 1272 1273 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1274 if (ddp->ddp_phys_birth == 0 || 1275 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 1276 continue; 1277 ddt_bp_create(checksum, ddk, ddp, &bp); 1278 1279 scn->scn_visited_this_txg++; 1280 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1281 } 1282} 1283 1284static void 1285dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1286{ 1287 dsl_pool_t *dp = scn->scn_dp; 1288 zap_cursor_t zc; 1289 zap_attribute_t za; 1290 1291 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1292 scn->scn_phys.scn_ddt_class_max) { 1293 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1294 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1295 dsl_scan_ddt(scn, tx); 1296 if (scn->scn_pausing) 1297 return; 1298 } 1299 1300 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1301 /* First do the MOS & ORIGIN */ 1302 1303 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1304 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1305 dsl_scan_visit_rootbp(scn, NULL, 1306 &dp->dp_meta_rootbp, tx); 1307 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1308 if (scn->scn_pausing) 1309 return; 1310 1311 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1312 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1313 enqueue_cb, tx, DS_FIND_CHILDREN)); 1314 } else { 1315 dsl_scan_visitds(scn, 1316 dp->dp_origin_snap->ds_object, tx); 1317 } 1318 ASSERT(!scn->scn_pausing); 1319 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1320 ZB_DESTROYED_OBJSET) { 1321 /* 1322 * If we were paused, continue from here. Note if the 1323 * ds we were paused on was deleted, the zb_objset may 1324 * be -1, so we will skip this and find a new objset 1325 * below. 1326 */ 1327 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1328 if (scn->scn_pausing) 1329 return; 1330 } 1331 1332 /* 1333 * In case we were paused right at the end of the ds, zero the 1334 * bookmark so we don't think that we're still trying to resume. 1335 */ 1336 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 1337 1338 /* keep pulling things out of the zap-object-as-queue */ 1339 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1340 scn->scn_phys.scn_queue_obj), 1341 zap_cursor_retrieve(&zc, &za) == 0) { 1342 dsl_dataset_t *ds; 1343 uint64_t dsobj; 1344 1345 dsobj = strtonum(za.za_name, NULL); 1346 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1347 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1348 1349 /* Set up min/max txg */ 1350 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1351 if (za.za_first_integer != 0) { 1352 scn->scn_phys.scn_cur_min_txg = 1353 MAX(scn->scn_phys.scn_min_txg, 1354 za.za_first_integer); 1355 } else { 1356 scn->scn_phys.scn_cur_min_txg = 1357 MAX(scn->scn_phys.scn_min_txg, 1358 dsl_dataset_phys(ds)->ds_prev_snap_txg); 1359 } 1360 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1361 dsl_dataset_rele(ds, FTAG); 1362 1363 dsl_scan_visitds(scn, dsobj, tx); 1364 zap_cursor_fini(&zc); 1365 if (scn->scn_pausing) 1366 return; 1367 } 1368 zap_cursor_fini(&zc); 1369} 1370 1371static boolean_t 1372dsl_scan_free_should_pause(dsl_scan_t *scn) 1373{ 1374 uint64_t elapsed_nanosecs; 1375 1376 if (zfs_recover) 1377 return (B_FALSE); 1378 1379 if (scn->scn_visited_this_txg >= zfs_free_max_blocks) 1380 return (B_TRUE); 1381 1382 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1383 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1384 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms && 1385 txg_sync_waiting(scn->scn_dp)) || 1386 spa_shutting_down(scn->scn_dp->dp_spa)); 1387} 1388 1389static int 1390dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1391{ 1392 dsl_scan_t *scn = arg; 1393 1394 if (!scn->scn_is_bptree || 1395 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1396 if (dsl_scan_free_should_pause(scn)) 1397 return (SET_ERROR(ERESTART)); 1398 } 1399 1400 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1401 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 1402 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1403 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1404 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1405 scn->scn_visited_this_txg++; 1406 return (0); 1407} 1408 1409boolean_t 1410dsl_scan_active(dsl_scan_t *scn) 1411{ 1412 spa_t *spa = scn->scn_dp->dp_spa; 1413 uint64_t used = 0, comp, uncomp; 1414 1415 if (spa->spa_load_state != SPA_LOAD_NONE) 1416 return (B_FALSE); 1417 if (spa_shutting_down(spa)) 1418 return (B_FALSE); 1419 if (scn->scn_phys.scn_state == DSS_SCANNING || 1420 (scn->scn_async_destroying && !scn->scn_async_stalled)) 1421 return (B_TRUE); 1422 1423 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1424 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1425 &used, &comp, &uncomp); 1426 } 1427 return (used != 0); 1428} 1429 1430void 1431dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1432{ 1433 dsl_scan_t *scn = dp->dp_scan; 1434 spa_t *spa = dp->dp_spa; 1435 int err = 0; 1436 1437 /* 1438 * Check for scn_restart_txg before checking spa_load_state, so 1439 * that we can restart an old-style scan while the pool is being 1440 * imported (see dsl_scan_init). 1441 */ 1442 if (scn->scn_restart_txg != 0 && 1443 scn->scn_restart_txg <= tx->tx_txg) { 1444 pool_scan_func_t func = POOL_SCAN_SCRUB; 1445 dsl_scan_done(scn, B_FALSE, tx); 1446 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1447 func = POOL_SCAN_RESILVER; 1448 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1449 func, tx->tx_txg); 1450 dsl_scan_setup_sync(&func, tx); 1451 } 1452 1453 /* 1454 * Only process scans in sync pass 1. 1455 */ 1456 if (spa_sync_pass(dp->dp_spa) > 1) 1457 return; 1458 1459 /* 1460 * If the spa is shutting down, then stop scanning. This will 1461 * ensure that the scan does not dirty any new data during the 1462 * shutdown phase. 1463 */ 1464 if (spa_shutting_down(spa)) 1465 return; 1466 1467 /* 1468 * If the scan is inactive due to a stalled async destroy, try again. 1469 */ 1470 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 1471 return; 1472 1473 scn->scn_visited_this_txg = 0; 1474 scn->scn_pausing = B_FALSE; 1475 scn->scn_sync_start_time = gethrtime(); 1476 spa->spa_scrub_active = B_TRUE; 1477 1478 /* 1479 * First process the async destroys. If we pause, don't do 1480 * any scrubbing or resilvering. This ensures that there are no 1481 * async destroys while we are scanning, so the scan code doesn't 1482 * have to worry about traversing it. It is also faster to free the 1483 * blocks than to scrub them. 1484 */ 1485 if (zfs_free_bpobj_enabled && 1486 spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1487 scn->scn_is_bptree = B_FALSE; 1488 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1489 NULL, ZIO_FLAG_MUSTSUCCEED); 1490 err = bpobj_iterate(&dp->dp_free_bpobj, 1491 dsl_scan_free_block_cb, scn, tx); 1492 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1493 1494 if (err != 0 && err != ERESTART) 1495 zfs_panic_recover("error %u from bpobj_iterate()", err); 1496 } 1497 1498 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 1499 ASSERT(scn->scn_async_destroying); 1500 scn->scn_is_bptree = B_TRUE; 1501 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1502 NULL, ZIO_FLAG_MUSTSUCCEED); 1503 err = bptree_iterate(dp->dp_meta_objset, 1504 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 1505 VERIFY0(zio_wait(scn->scn_zio_root)); 1506 1507 if (err == EIO || err == ECKSUM) { 1508 err = 0; 1509 } else if (err != 0 && err != ERESTART) { 1510 zfs_panic_recover("error %u from " 1511 "traverse_dataset_destroyed()", err); 1512 } 1513 1514 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 1515 /* finished; deactivate async destroy feature */ 1516 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 1517 ASSERT(!spa_feature_is_active(spa, 1518 SPA_FEATURE_ASYNC_DESTROY)); 1519 VERIFY0(zap_remove(dp->dp_meta_objset, 1520 DMU_POOL_DIRECTORY_OBJECT, 1521 DMU_POOL_BPTREE_OBJ, tx)); 1522 VERIFY0(bptree_free(dp->dp_meta_objset, 1523 dp->dp_bptree_obj, tx)); 1524 dp->dp_bptree_obj = 0; 1525 scn->scn_async_destroying = B_FALSE; 1526 scn->scn_async_stalled = B_FALSE; 1527 } else { 1528 /* 1529 * If we didn't make progress, mark the async 1530 * destroy as stalled, so that we will not initiate 1531 * a spa_sync() on its behalf. Note that we only 1532 * check this if we are not finished, because if the 1533 * bptree had no blocks for us to visit, we can 1534 * finish without "making progress". 1535 */ 1536 scn->scn_async_stalled = 1537 (scn->scn_visited_this_txg == 0); 1538 } 1539 } 1540 if (scn->scn_visited_this_txg) { 1541 zfs_dbgmsg("freed %llu blocks in %llums from " 1542 "free_bpobj/bptree txg %llu; err=%d", 1543 (longlong_t)scn->scn_visited_this_txg, 1544 (longlong_t) 1545 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 1546 (longlong_t)tx->tx_txg, err); 1547 scn->scn_visited_this_txg = 0; 1548 1549 /* 1550 * Write out changes to the DDT that may be required as a 1551 * result of the blocks freed. This ensures that the DDT 1552 * is clean when a scrub/resilver runs. 1553 */ 1554 ddt_sync(spa, tx->tx_txg); 1555 } 1556 if (err != 0) 1557 return; 1558 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 1559 zfs_free_leak_on_eio && 1560 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 1561 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 1562 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 1563 /* 1564 * We have finished background destroying, but there is still 1565 * some space left in the dp_free_dir. Transfer this leaked 1566 * space to the dp_leak_dir. 1567 */ 1568 if (dp->dp_leak_dir == NULL) { 1569 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 1570 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 1571 LEAK_DIR_NAME, tx); 1572 VERIFY0(dsl_pool_open_special_dir(dp, 1573 LEAK_DIR_NAME, &dp->dp_leak_dir)); 1574 rrw_exit(&dp->dp_config_rwlock, FTAG); 1575 } 1576 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 1577 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 1578 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 1579 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 1580 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 1581 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 1582 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 1583 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 1584 } 1585 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) { 1586 /* finished; verify that space accounting went to zero */ 1587 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 1588 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 1589 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 1590 } 1591 1592 if (scn->scn_phys.scn_state != DSS_SCANNING) 1593 return; 1594 1595 if (scn->scn_done_txg == tx->tx_txg) { 1596 ASSERT(!scn->scn_pausing); 1597 /* finished with scan. */ 1598 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg); 1599 dsl_scan_done(scn, B_TRUE, tx); 1600 ASSERT3U(spa->spa_scrub_inflight, ==, 0); 1601 dsl_scan_sync_state(scn, tx); 1602 return; 1603 } 1604 1605 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1606 scn->scn_phys.scn_ddt_class_max) { 1607 zfs_dbgmsg("doing scan sync txg %llu; " 1608 "ddt bm=%llu/%llu/%llu/%llx", 1609 (longlong_t)tx->tx_txg, 1610 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1611 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1612 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1613 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1614 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1615 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1616 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1617 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1618 } else { 1619 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1620 (longlong_t)tx->tx_txg, 1621 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1622 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1623 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1624 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1625 } 1626 1627 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1628 NULL, ZIO_FLAG_CANFAIL); 1629 dsl_pool_config_enter(dp, FTAG); 1630 dsl_scan_visit(scn, tx); 1631 dsl_pool_config_exit(dp, FTAG); 1632 (void) zio_wait(scn->scn_zio_root); 1633 scn->scn_zio_root = NULL; 1634 1635 zfs_dbgmsg("visited %llu blocks in %llums", 1636 (longlong_t)scn->scn_visited_this_txg, 1637 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time)); 1638 1639 if (!scn->scn_pausing) { 1640 scn->scn_done_txg = tx->tx_txg + 1; 1641 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu", 1642 tx->tx_txg, scn->scn_done_txg); 1643 } 1644 1645 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1646 mutex_enter(&spa->spa_scrub_lock); 1647 while (spa->spa_scrub_inflight > 0) { 1648 cv_wait(&spa->spa_scrub_io_cv, 1649 &spa->spa_scrub_lock); 1650 } 1651 mutex_exit(&spa->spa_scrub_lock); 1652 } 1653 1654 dsl_scan_sync_state(scn, tx); 1655} 1656 1657/* 1658 * This will start a new scan, or restart an existing one. 1659 */ 1660void 1661dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1662{ 1663 if (txg == 0) { 1664 dmu_tx_t *tx; 1665 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1666 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1667 1668 txg = dmu_tx_get_txg(tx); 1669 dp->dp_scan->scn_restart_txg = txg; 1670 dmu_tx_commit(tx); 1671 } else { 1672 dp->dp_scan->scn_restart_txg = txg; 1673 } 1674 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1675} 1676 1677boolean_t 1678dsl_scan_resilvering(dsl_pool_t *dp) 1679{ 1680 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1681 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1682} 1683 1684/* 1685 * scrub consumers 1686 */ 1687 1688static void 1689count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1690{ 1691 int i; 1692 1693 /* 1694 * If we resume after a reboot, zab will be NULL; don't record 1695 * incomplete stats in that case. 1696 */ 1697 if (zab == NULL) 1698 return; 1699 1700 for (i = 0; i < 4; i++) { 1701 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1702 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1703 if (t & DMU_OT_NEWTYPE) 1704 t = DMU_OT_OTHER; 1705 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1706 int equal; 1707 1708 zb->zb_count++; 1709 zb->zb_asize += BP_GET_ASIZE(bp); 1710 zb->zb_lsize += BP_GET_LSIZE(bp); 1711 zb->zb_psize += BP_GET_PSIZE(bp); 1712 zb->zb_gangs += BP_COUNT_GANG(bp); 1713 1714 switch (BP_GET_NDVAS(bp)) { 1715 case 2: 1716 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1717 DVA_GET_VDEV(&bp->blk_dva[1])) 1718 zb->zb_ditto_2_of_2_samevdev++; 1719 break; 1720 case 3: 1721 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1722 DVA_GET_VDEV(&bp->blk_dva[1])) + 1723 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1724 DVA_GET_VDEV(&bp->blk_dva[2])) + 1725 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1726 DVA_GET_VDEV(&bp->blk_dva[2])); 1727 if (equal == 1) 1728 zb->zb_ditto_2_of_3_samevdev++; 1729 else if (equal == 3) 1730 zb->zb_ditto_3_of_3_samevdev++; 1731 break; 1732 } 1733 } 1734} 1735 1736static void 1737dsl_scan_scrub_done(zio_t *zio) 1738{ 1739 spa_t *spa = zio->io_spa; 1740 1741 zio_data_buf_free(zio->io_data, zio->io_size); 1742 1743 mutex_enter(&spa->spa_scrub_lock); 1744 spa->spa_scrub_inflight--; 1745 cv_broadcast(&spa->spa_scrub_io_cv); 1746 1747 if (zio->io_error && (zio->io_error != ECKSUM || 1748 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1749 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1750 } 1751 mutex_exit(&spa->spa_scrub_lock); 1752} 1753 1754static int 1755dsl_scan_scrub_cb(dsl_pool_t *dp, 1756 const blkptr_t *bp, const zbookmark_phys_t *zb) 1757{ 1758 dsl_scan_t *scn = dp->dp_scan; 1759 size_t size = BP_GET_PSIZE(bp); 1760 spa_t *spa = dp->dp_spa; 1761 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1762 boolean_t needs_io; 1763 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1764 unsigned int scan_delay = 0; 1765 1766 if (phys_birth <= scn->scn_phys.scn_min_txg || 1767 phys_birth >= scn->scn_phys.scn_max_txg) 1768 return (0); 1769 1770 count_block(dp->dp_blkstats, bp); 1771 1772 if (BP_IS_EMBEDDED(bp)) 1773 return (0); 1774 1775 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1776 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1777 zio_flags |= ZIO_FLAG_SCRUB; 1778 needs_io = B_TRUE; 1779 scan_delay = zfs_scrub_delay; 1780 } else { 1781 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 1782 zio_flags |= ZIO_FLAG_RESILVER; 1783 needs_io = B_FALSE; 1784 scan_delay = zfs_resilver_delay; 1785 } 1786 1787 /* If it's an intent log block, failure is expected. */ 1788 if (zb->zb_level == ZB_ZIL_LEVEL) 1789 zio_flags |= ZIO_FLAG_SPECULATIVE; 1790 1791 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1792 vdev_t *vd = vdev_lookup_top(spa, 1793 DVA_GET_VDEV(&bp->blk_dva[d])); 1794 1795 /* 1796 * Keep track of how much data we've examined so that 1797 * zpool(1M) status can make useful progress reports. 1798 */ 1799 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1800 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1801 1802 /* if it's a resilver, this may not be in the target range */ 1803 if (!needs_io) { 1804 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1805 /* 1806 * Gang members may be spread across multiple 1807 * vdevs, so the best estimate we have is the 1808 * scrub range, which has already been checked. 1809 * XXX -- it would be better to change our 1810 * allocation policy to ensure that all 1811 * gang members reside on the same vdev. 1812 */ 1813 needs_io = B_TRUE; 1814 } else { 1815 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1816 phys_birth, 1); 1817 } 1818 } 1819 } 1820 1821 if (needs_io && !zfs_no_scrub_io) { 1822 vdev_t *rvd = spa->spa_root_vdev; 1823 uint64_t maxinflight = rvd->vdev_children * 1824 MAX(zfs_top_maxinflight, 1); 1825 void *data = zio_data_buf_alloc(size); 1826 1827 mutex_enter(&spa->spa_scrub_lock); 1828 while (spa->spa_scrub_inflight >= maxinflight) 1829 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1830 spa->spa_scrub_inflight++; 1831 mutex_exit(&spa->spa_scrub_lock); 1832 1833 /* 1834 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1835 * then throttle our workload to limit the impact of a scan. 1836 */ 1837 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1838 delay(MAX((int)scan_delay, 0)); 1839 1840 zio_nowait(zio_read(NULL, spa, bp, data, size, 1841 dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB, 1842 zio_flags, zb)); 1843 } 1844 1845 /* do not relocate this block */ 1846 return (0); 1847} 1848 1849int 1850dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 1851{ 1852 spa_t *spa = dp->dp_spa; 1853 1854 /* 1855 * Purge all vdev caches and probe all devices. We do this here 1856 * rather than in sync context because this requires a writer lock 1857 * on the spa_config lock, which we can't do from sync context. The 1858 * spa_scrub_reopen flag indicates that vdev_open() should not 1859 * attempt to start another scrub. 1860 */ 1861 spa_vdev_state_enter(spa, SCL_NONE); 1862 spa->spa_scrub_reopen = B_TRUE; 1863 vdev_reopen(spa->spa_root_vdev); 1864 spa->spa_scrub_reopen = B_FALSE; 1865 (void) spa_vdev_state_exit(spa, NULL, 0); 1866 1867 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 1868 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE)); 1869} 1870