dsl_scan.c revision 263397
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 */ 25 26#include <sys/dsl_scan.h> 27#include <sys/dsl_pool.h> 28#include <sys/dsl_dataset.h> 29#include <sys/dsl_prop.h> 30#include <sys/dsl_dir.h> 31#include <sys/dsl_synctask.h> 32#include <sys/dnode.h> 33#include <sys/dmu_tx.h> 34#include <sys/dmu_objset.h> 35#include <sys/arc.h> 36#include <sys/zap.h> 37#include <sys/zio.h> 38#include <sys/zfs_context.h> 39#include <sys/fs/zfs.h> 40#include <sys/zfs_znode.h> 41#include <sys/spa_impl.h> 42#include <sys/vdev_impl.h> 43#include <sys/zil_impl.h> 44#include <sys/zio_checksum.h> 45#include <sys/ddt.h> 46#include <sys/sa.h> 47#include <sys/sa_impl.h> 48#include <sys/zfeature.h> 49#ifdef _KERNEL 50#include <sys/zfs_vfsops.h> 51#endif 52 53typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *); 54 55static scan_cb_t dsl_scan_defrag_cb; 56static scan_cb_t dsl_scan_scrub_cb; 57static scan_cb_t dsl_scan_remove_cb; 58static void dsl_scan_cancel_sync(void *, dmu_tx_t *); 59static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); 60 61unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 62unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 63unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 64unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 65 66unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 67unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 68unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver 69 per txg */ 70boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 71boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */ 72 73SYSCTL_DECL(_vfs_zfs); 74TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight); 75SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW, 76 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 77TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay); 78SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW, 79 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 80TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay); 81SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW, 82 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 83TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle); 84SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW, 85 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 86TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms); 87SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW, 88 &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); 89TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms); 90SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW, 91 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 92TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms); 93SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW, 94 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 95TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io); 96SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW, 97 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 98TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch); 99SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW, 100 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 101 102enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 103 104#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 105 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 106 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 107 108extern int zfs_txg_timeout; 109 110/* the order has to match pool_scan_type */ 111static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 112 NULL, 113 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 114 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 115}; 116 117int 118dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 119{ 120 int err; 121 dsl_scan_t *scn; 122 spa_t *spa = dp->dp_spa; 123 uint64_t f; 124 125 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 126 scn->scn_dp = dp; 127 128 /* 129 * It's possible that we're resuming a scan after a reboot so 130 * make sure that the scan_async_destroying flag is initialized 131 * appropriately. 132 */ 133 ASSERT(!scn->scn_async_destroying); 134 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 135 SPA_FEATURE_ASYNC_DESTROY); 136 137 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 138 "scrub_func", sizeof (uint64_t), 1, &f); 139 if (err == 0) { 140 /* 141 * There was an old-style scrub in progress. Restart a 142 * new-style scrub from the beginning. 143 */ 144 scn->scn_restart_txg = txg; 145 zfs_dbgmsg("old-style scrub was in progress; " 146 "restarting new-style scrub in txg %llu", 147 scn->scn_restart_txg); 148 149 /* 150 * Load the queue obj from the old location so that it 151 * can be freed by dsl_scan_done(). 152 */ 153 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 154 "scrub_queue", sizeof (uint64_t), 1, 155 &scn->scn_phys.scn_queue_obj); 156 } else { 157 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 158 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 159 &scn->scn_phys); 160 if (err == ENOENT) 161 return (0); 162 else if (err) 163 return (err); 164 165 if (scn->scn_phys.scn_state == DSS_SCANNING && 166 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 167 /* 168 * A new-type scrub was in progress on an old 169 * pool, and the pool was accessed by old 170 * software. Restart from the beginning, since 171 * the old software may have changed the pool in 172 * the meantime. 173 */ 174 scn->scn_restart_txg = txg; 175 zfs_dbgmsg("new-style scrub was modified " 176 "by old software; restarting in txg %llu", 177 scn->scn_restart_txg); 178 } 179 } 180 181 spa_scan_stat_init(spa); 182 return (0); 183} 184 185void 186dsl_scan_fini(dsl_pool_t *dp) 187{ 188 if (dp->dp_scan) { 189 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 190 dp->dp_scan = NULL; 191 } 192} 193 194/* ARGSUSED */ 195static int 196dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 197{ 198 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 199 200 if (scn->scn_phys.scn_state == DSS_SCANNING) 201 return (SET_ERROR(EBUSY)); 202 203 return (0); 204} 205 206static void 207dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 208{ 209 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 210 pool_scan_func_t *funcp = arg; 211 dmu_object_type_t ot = 0; 212 dsl_pool_t *dp = scn->scn_dp; 213 spa_t *spa = dp->dp_spa; 214 215 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 216 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 217 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 218 scn->scn_phys.scn_func = *funcp; 219 scn->scn_phys.scn_state = DSS_SCANNING; 220 scn->scn_phys.scn_min_txg = 0; 221 scn->scn_phys.scn_max_txg = tx->tx_txg; 222 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 223 scn->scn_phys.scn_start_time = gethrestime_sec(); 224 scn->scn_phys.scn_errors = 0; 225 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 226 scn->scn_restart_txg = 0; 227 scn->scn_done_txg = 0; 228 spa_scan_stat_init(spa); 229 230 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 231 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 232 233 /* rewrite all disk labels */ 234 vdev_config_dirty(spa->spa_root_vdev); 235 236 if (vdev_resilver_needed(spa->spa_root_vdev, 237 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 238 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 239 } else { 240 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); 241 } 242 243 spa->spa_scrub_started = B_TRUE; 244 /* 245 * If this is an incremental scrub, limit the DDT scrub phase 246 * to just the auto-ditto class (for correctness); the rest 247 * of the scrub should go faster using top-down pruning. 248 */ 249 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 250 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 251 252 } 253 254 /* back to the generic stuff */ 255 256 if (dp->dp_blkstats == NULL) { 257 dp->dp_blkstats = 258 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 259 } 260 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 261 262 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 263 ot = DMU_OT_ZAP_OTHER; 264 265 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 266 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 267 268 dsl_scan_sync_state(scn, tx); 269 270 spa_history_log_internal(spa, "scan setup", tx, 271 "func=%u mintxg=%llu maxtxg=%llu", 272 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 273} 274 275/* ARGSUSED */ 276static void 277dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 278{ 279 static const char *old_names[] = { 280 "scrub_bookmark", 281 "scrub_ddt_bookmark", 282 "scrub_ddt_class_max", 283 "scrub_queue", 284 "scrub_min_txg", 285 "scrub_max_txg", 286 "scrub_func", 287 "scrub_errors", 288 NULL 289 }; 290 291 dsl_pool_t *dp = scn->scn_dp; 292 spa_t *spa = dp->dp_spa; 293 int i; 294 295 /* Remove any remnants of an old-style scrub. */ 296 for (i = 0; old_names[i]; i++) { 297 (void) zap_remove(dp->dp_meta_objset, 298 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 299 } 300 301 if (scn->scn_phys.scn_queue_obj != 0) { 302 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 303 scn->scn_phys.scn_queue_obj, tx)); 304 scn->scn_phys.scn_queue_obj = 0; 305 } 306 307 /* 308 * If we were "restarted" from a stopped state, don't bother 309 * with anything else. 310 */ 311 if (scn->scn_phys.scn_state != DSS_SCANNING) 312 return; 313 314 if (complete) 315 scn->scn_phys.scn_state = DSS_FINISHED; 316 else 317 scn->scn_phys.scn_state = DSS_CANCELED; 318 319 spa_history_log_internal(spa, "scan done", tx, 320 "complete=%u", complete); 321 322 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 323 mutex_enter(&spa->spa_scrub_lock); 324 while (spa->spa_scrub_inflight > 0) { 325 cv_wait(&spa->spa_scrub_io_cv, 326 &spa->spa_scrub_lock); 327 } 328 mutex_exit(&spa->spa_scrub_lock); 329 spa->spa_scrub_started = B_FALSE; 330 spa->spa_scrub_active = B_FALSE; 331 332 /* 333 * If the scrub/resilver completed, update all DTLs to 334 * reflect this. Whether it succeeded or not, vacate 335 * all temporary scrub DTLs. 336 */ 337 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 338 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 339 if (complete) { 340 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? 341 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 342 } 343 spa_errlog_rotate(spa); 344 345 /* 346 * We may have finished replacing a device. 347 * Let the async thread assess this and handle the detach. 348 */ 349 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 350 } 351 352 scn->scn_phys.scn_end_time = gethrestime_sec(); 353} 354 355/* ARGSUSED */ 356static int 357dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 358{ 359 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 360 361 if (scn->scn_phys.scn_state != DSS_SCANNING) 362 return (SET_ERROR(ENOENT)); 363 return (0); 364} 365 366/* ARGSUSED */ 367static void 368dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 369{ 370 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 371 372 dsl_scan_done(scn, B_FALSE, tx); 373 dsl_scan_sync_state(scn, tx); 374} 375 376int 377dsl_scan_cancel(dsl_pool_t *dp) 378{ 379 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 380 dsl_scan_cancel_sync, NULL, 3)); 381} 382 383static void dsl_scan_visitbp(blkptr_t *bp, 384 const zbookmark_t *zb, dnode_phys_t *dnp, arc_buf_t *pbuf, 385 dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype, 386 dmu_tx_t *tx); 387static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 388 dmu_objset_type_t ostype, 389 dnode_phys_t *dnp, arc_buf_t *buf, uint64_t object, dmu_tx_t *tx); 390 391void 392dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 393{ 394 zio_free(dp->dp_spa, txg, bp); 395} 396 397void 398dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 399{ 400 ASSERT(dsl_pool_sync_context(dp)); 401 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 402 pio->io_flags)); 403} 404 405static uint64_t 406dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 407{ 408 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 409 if (dsl_dataset_is_snapshot(ds)) 410 return (MIN(smt, ds->ds_phys->ds_creation_txg)); 411 return (smt); 412} 413 414static void 415dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 416{ 417 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 418 DMU_POOL_DIRECTORY_OBJECT, 419 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 420 &scn->scn_phys, tx)); 421} 422 423static boolean_t 424dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_t *zb) 425{ 426 uint64_t elapsed_nanosecs; 427 unsigned int mintime; 428 429 /* we never skip user/group accounting objects */ 430 if (zb && (int64_t)zb->zb_object < 0) 431 return (B_FALSE); 432 433 if (scn->scn_pausing) 434 return (B_TRUE); /* we're already pausing */ 435 436 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 437 return (B_FALSE); /* we're resuming */ 438 439 /* We only know how to resume from level-0 blocks. */ 440 if (zb && zb->zb_level != 0) 441 return (B_FALSE); 442 443 mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 444 zfs_resilver_min_time_ms : zfs_scan_min_time_ms; 445 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 446 if (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 447 (NSEC2MSEC(elapsed_nanosecs) > mintime && 448 txg_sync_waiting(scn->scn_dp)) || 449 spa_shutting_down(scn->scn_dp->dp_spa)) { 450 if (zb) { 451 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n", 452 (longlong_t)zb->zb_objset, 453 (longlong_t)zb->zb_object, 454 (longlong_t)zb->zb_level, 455 (longlong_t)zb->zb_blkid); 456 scn->scn_phys.scn_bookmark = *zb; 457 } 458 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n", 459 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 460 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 461 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 462 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 463 scn->scn_pausing = B_TRUE; 464 return (B_TRUE); 465 } 466 return (B_FALSE); 467} 468 469typedef struct zil_scan_arg { 470 dsl_pool_t *zsa_dp; 471 zil_header_t *zsa_zh; 472} zil_scan_arg_t; 473 474/* ARGSUSED */ 475static int 476dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 477{ 478 zil_scan_arg_t *zsa = arg; 479 dsl_pool_t *dp = zsa->zsa_dp; 480 dsl_scan_t *scn = dp->dp_scan; 481 zil_header_t *zh = zsa->zsa_zh; 482 zbookmark_t zb; 483 484 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 485 return (0); 486 487 /* 488 * One block ("stubby") can be allocated a long time ago; we 489 * want to visit that one because it has been allocated 490 * (on-disk) even if it hasn't been claimed (even though for 491 * scrub there's nothing to do to it). 492 */ 493 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 494 return (0); 495 496 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 497 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 498 499 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 500 return (0); 501} 502 503/* ARGSUSED */ 504static int 505dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 506{ 507 if (lrc->lrc_txtype == TX_WRITE) { 508 zil_scan_arg_t *zsa = arg; 509 dsl_pool_t *dp = zsa->zsa_dp; 510 dsl_scan_t *scn = dp->dp_scan; 511 zil_header_t *zh = zsa->zsa_zh; 512 lr_write_t *lr = (lr_write_t *)lrc; 513 blkptr_t *bp = &lr->lr_blkptr; 514 zbookmark_t zb; 515 516 if (BP_IS_HOLE(bp) || 517 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 518 return (0); 519 520 /* 521 * birth can be < claim_txg if this record's txg is 522 * already txg sync'ed (but this log block contains 523 * other records that are not synced) 524 */ 525 if (claim_txg == 0 || bp->blk_birth < claim_txg) 526 return (0); 527 528 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 529 lr->lr_foid, ZB_ZIL_LEVEL, 530 lr->lr_offset / BP_GET_LSIZE(bp)); 531 532 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 533 } 534 return (0); 535} 536 537static void 538dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 539{ 540 uint64_t claim_txg = zh->zh_claim_txg; 541 zil_scan_arg_t zsa = { dp, zh }; 542 zilog_t *zilog; 543 544 /* 545 * We only want to visit blocks that have been claimed but not yet 546 * replayed (or, in read-only mode, blocks that *would* be claimed). 547 */ 548 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 549 return; 550 551 zilog = zil_alloc(dp->dp_meta_objset, zh); 552 553 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 554 claim_txg); 555 556 zil_free(zilog); 557} 558 559/* ARGSUSED */ 560static void 561dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 562 uint64_t objset, uint64_t object, uint64_t blkid) 563{ 564 zbookmark_t czb; 565 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH; 566 567 if (zfs_no_scrub_prefetch) 568 return; 569 570 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 571 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 572 return; 573 574 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid); 575 576 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 577 NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 578 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb); 579} 580 581static boolean_t 582dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 583 const zbookmark_t *zb) 584{ 585 /* 586 * We never skip over user/group accounting objects (obj<0) 587 */ 588 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 589 (int64_t)zb->zb_object >= 0) { 590 /* 591 * If we already visited this bp & everything below (in 592 * a prior txg sync), don't bother doing it again. 593 */ 594 if (zbookmark_is_before(dnp, zb, &scn->scn_phys.scn_bookmark)) 595 return (B_TRUE); 596 597 /* 598 * If we found the block we're trying to resume from, or 599 * we went past it to a different object, zero it out to 600 * indicate that it's OK to start checking for pausing 601 * again. 602 */ 603 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 604 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 605 dprintf("resuming at %llx/%llx/%llx/%llx\n", 606 (longlong_t)zb->zb_objset, 607 (longlong_t)zb->zb_object, 608 (longlong_t)zb->zb_level, 609 (longlong_t)zb->zb_blkid); 610 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 611 } 612 } 613 return (B_FALSE); 614} 615 616/* 617 * Return nonzero on i/o error. 618 * Return new buf to write out in *bufp. 619 */ 620static int 621dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 622 dnode_phys_t *dnp, const blkptr_t *bp, 623 const zbookmark_t *zb, dmu_tx_t *tx, arc_buf_t **bufp) 624{ 625 dsl_pool_t *dp = scn->scn_dp; 626 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 627 int err; 628 629 if (BP_GET_LEVEL(bp) > 0) { 630 uint32_t flags = ARC_WAIT; 631 int i; 632 blkptr_t *cbp; 633 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 634 635 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 636 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 637 if (err) { 638 scn->scn_phys.scn_errors++; 639 return (err); 640 } 641 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) { 642 dsl_scan_prefetch(scn, *bufp, cbp, zb->zb_objset, 643 zb->zb_object, zb->zb_blkid * epb + i); 644 } 645 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) { 646 zbookmark_t czb; 647 648 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 649 zb->zb_level - 1, 650 zb->zb_blkid * epb + i); 651 dsl_scan_visitbp(cbp, &czb, dnp, 652 *bufp, ds, scn, ostype, tx); 653 } 654 } else if (BP_GET_TYPE(bp) == DMU_OT_USERGROUP_USED) { 655 uint32_t flags = ARC_WAIT; 656 657 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 658 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 659 if (err) { 660 scn->scn_phys.scn_errors++; 661 return (err); 662 } 663 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 664 uint32_t flags = ARC_WAIT; 665 dnode_phys_t *cdnp; 666 int i, j; 667 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 668 669 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 670 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 671 if (err) { 672 scn->scn_phys.scn_errors++; 673 return (err); 674 } 675 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) { 676 for (j = 0; j < cdnp->dn_nblkptr; j++) { 677 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 678 dsl_scan_prefetch(scn, *bufp, cbp, 679 zb->zb_objset, zb->zb_blkid * epb + i, j); 680 } 681 } 682 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) { 683 dsl_scan_visitdnode(scn, ds, ostype, 684 cdnp, *bufp, zb->zb_blkid * epb + i, tx); 685 } 686 687 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 688 uint32_t flags = ARC_WAIT; 689 objset_phys_t *osp; 690 691 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp, 692 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 693 if (err) { 694 scn->scn_phys.scn_errors++; 695 return (err); 696 } 697 698 osp = (*bufp)->b_data; 699 700 dsl_scan_visitdnode(scn, ds, osp->os_type, 701 &osp->os_meta_dnode, *bufp, DMU_META_DNODE_OBJECT, tx); 702 703 if (OBJSET_BUF_HAS_USERUSED(*bufp)) { 704 /* 705 * We also always visit user/group accounting 706 * objects, and never skip them, even if we are 707 * pausing. This is necessary so that the space 708 * deltas from this txg get integrated. 709 */ 710 dsl_scan_visitdnode(scn, ds, osp->os_type, 711 &osp->os_groupused_dnode, *bufp, 712 DMU_GROUPUSED_OBJECT, tx); 713 dsl_scan_visitdnode(scn, ds, osp->os_type, 714 &osp->os_userused_dnode, *bufp, 715 DMU_USERUSED_OBJECT, tx); 716 } 717 } 718 719 return (0); 720} 721 722static void 723dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 724 dmu_objset_type_t ostype, dnode_phys_t *dnp, arc_buf_t *buf, 725 uint64_t object, dmu_tx_t *tx) 726{ 727 int j; 728 729 for (j = 0; j < dnp->dn_nblkptr; j++) { 730 zbookmark_t czb; 731 732 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 733 dnp->dn_nlevels - 1, j); 734 dsl_scan_visitbp(&dnp->dn_blkptr[j], 735 &czb, dnp, buf, ds, scn, ostype, tx); 736 } 737 738 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 739 zbookmark_t czb; 740 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 741 0, DMU_SPILL_BLKID); 742 dsl_scan_visitbp(&dnp->dn_spill, 743 &czb, dnp, buf, ds, scn, ostype, tx); 744 } 745} 746 747/* 748 * The arguments are in this order because mdb can only print the 749 * first 5; we want them to be useful. 750 */ 751static void 752dsl_scan_visitbp(blkptr_t *bp, const zbookmark_t *zb, 753 dnode_phys_t *dnp, arc_buf_t *pbuf, 754 dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype, 755 dmu_tx_t *tx) 756{ 757 dsl_pool_t *dp = scn->scn_dp; 758 arc_buf_t *buf = NULL; 759 blkptr_t bp_toread = *bp; 760 761 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */ 762 763 if (dsl_scan_check_pause(scn, zb)) 764 return; 765 766 if (dsl_scan_check_resume(scn, dnp, zb)) 767 return; 768 769 if (BP_IS_HOLE(bp)) 770 return; 771 772 scn->scn_visited_this_txg++; 773 774 dprintf_bp(bp, 775 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx buf=%p bp=%p", 776 ds, ds ? ds->ds_object : 0, 777 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 778 pbuf, bp); 779 780 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 781 return; 782 783 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx, 784 &buf) != 0) 785 return; 786 787 /* 788 * If dsl_scan_ddt() has aready visited this block, it will have 789 * already done any translations or scrubbing, so don't call the 790 * callback again. 791 */ 792 if (ddt_class_contains(dp->dp_spa, 793 scn->scn_phys.scn_ddt_class_max, bp)) { 794 ASSERT(buf == NULL); 795 return; 796 } 797 798 /* 799 * If this block is from the future (after cur_max_txg), then we 800 * are doing this on behalf of a deleted snapshot, and we will 801 * revisit the future block on the next pass of this dataset. 802 * Don't scan it now unless we need to because something 803 * under it was modified. 804 */ 805 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) { 806 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 807 } 808 if (buf) 809 (void) arc_buf_remove_ref(buf, &buf); 810} 811 812static void 813dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 814 dmu_tx_t *tx) 815{ 816 zbookmark_t zb; 817 818 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 819 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 820 dsl_scan_visitbp(bp, &zb, NULL, NULL, 821 ds, scn, DMU_OST_NONE, tx); 822 823 dprintf_ds(ds, "finished scan%s", ""); 824} 825 826void 827dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 828{ 829 dsl_pool_t *dp = ds->ds_dir->dd_pool; 830 dsl_scan_t *scn = dp->dp_scan; 831 uint64_t mintxg; 832 833 if (scn->scn_phys.scn_state != DSS_SCANNING) 834 return; 835 836 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 837 if (dsl_dataset_is_snapshot(ds)) { 838 /* Note, scn_cur_{min,max}_txg stays the same. */ 839 scn->scn_phys.scn_bookmark.zb_objset = 840 ds->ds_phys->ds_next_snap_obj; 841 zfs_dbgmsg("destroying ds %llu; currently traversing; " 842 "reset zb_objset to %llu", 843 (u_longlong_t)ds->ds_object, 844 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 845 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 846 } else { 847 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 848 ZB_DESTROYED_OBJSET, 0, 0, 0); 849 zfs_dbgmsg("destroying ds %llu; currently traversing; " 850 "reset bookmark to -1,0,0,0", 851 (u_longlong_t)ds->ds_object); 852 } 853 } else if (zap_lookup_int_key(dp->dp_meta_objset, 854 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 855 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); 856 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 857 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 858 if (dsl_dataset_is_snapshot(ds)) { 859 /* 860 * We keep the same mintxg; it could be > 861 * ds_creation_txg if the previous snapshot was 862 * deleted too. 863 */ 864 VERIFY(zap_add_int_key(dp->dp_meta_objset, 865 scn->scn_phys.scn_queue_obj, 866 ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0); 867 zfs_dbgmsg("destroying ds %llu; in queue; " 868 "replacing with %llu", 869 (u_longlong_t)ds->ds_object, 870 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 871 } else { 872 zfs_dbgmsg("destroying ds %llu; in queue; removing", 873 (u_longlong_t)ds->ds_object); 874 } 875 } else { 876 zfs_dbgmsg("destroying ds %llu; ignoring", 877 (u_longlong_t)ds->ds_object); 878 } 879 880 /* 881 * dsl_scan_sync() should be called after this, and should sync 882 * out our changed state, but just to be safe, do it here. 883 */ 884 dsl_scan_sync_state(scn, tx); 885} 886 887void 888dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 889{ 890 dsl_pool_t *dp = ds->ds_dir->dd_pool; 891 dsl_scan_t *scn = dp->dp_scan; 892 uint64_t mintxg; 893 894 if (scn->scn_phys.scn_state != DSS_SCANNING) 895 return; 896 897 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0); 898 899 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 900 scn->scn_phys.scn_bookmark.zb_objset = 901 ds->ds_phys->ds_prev_snap_obj; 902 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 903 "reset zb_objset to %llu", 904 (u_longlong_t)ds->ds_object, 905 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 906 } else if (zap_lookup_int_key(dp->dp_meta_objset, 907 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 908 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 909 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 910 VERIFY(zap_add_int_key(dp->dp_meta_objset, 911 scn->scn_phys.scn_queue_obj, 912 ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0); 913 zfs_dbgmsg("snapshotting ds %llu; in queue; " 914 "replacing with %llu", 915 (u_longlong_t)ds->ds_object, 916 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 917 } 918 dsl_scan_sync_state(scn, tx); 919} 920 921void 922dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 923{ 924 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 925 dsl_scan_t *scn = dp->dp_scan; 926 uint64_t mintxg; 927 928 if (scn->scn_phys.scn_state != DSS_SCANNING) 929 return; 930 931 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 932 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 933 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 934 "reset zb_objset to %llu", 935 (u_longlong_t)ds1->ds_object, 936 (u_longlong_t)ds2->ds_object); 937 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 938 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 939 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 940 "reset zb_objset to %llu", 941 (u_longlong_t)ds2->ds_object, 942 (u_longlong_t)ds1->ds_object); 943 } 944 945 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 946 ds1->ds_object, &mintxg) == 0) { 947 int err; 948 949 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 950 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 951 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 952 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 953 err = zap_add_int_key(dp->dp_meta_objset, 954 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 955 VERIFY(err == 0 || err == EEXIST); 956 if (err == EEXIST) { 957 /* Both were there to begin with */ 958 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 959 scn->scn_phys.scn_queue_obj, 960 ds1->ds_object, mintxg, tx)); 961 } 962 zfs_dbgmsg("clone_swap ds %llu; in queue; " 963 "replacing with %llu", 964 (u_longlong_t)ds1->ds_object, 965 (u_longlong_t)ds2->ds_object); 966 } else if (zap_lookup_int_key(dp->dp_meta_objset, 967 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 968 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 969 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 970 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 971 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 972 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 973 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 974 zfs_dbgmsg("clone_swap ds %llu; in queue; " 975 "replacing with %llu", 976 (u_longlong_t)ds2->ds_object, 977 (u_longlong_t)ds1->ds_object); 978 } 979 980 dsl_scan_sync_state(scn, tx); 981} 982 983struct enqueue_clones_arg { 984 dmu_tx_t *tx; 985 uint64_t originobj; 986}; 987 988/* ARGSUSED */ 989static int 990enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 991{ 992 struct enqueue_clones_arg *eca = arg; 993 dsl_dataset_t *ds; 994 int err; 995 dsl_scan_t *scn = dp->dp_scan; 996 997 if (hds->ds_dir->dd_phys->dd_origin_obj != eca->originobj) 998 return (0); 999 1000 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1001 if (err) 1002 return (err); 1003 1004 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) { 1005 dsl_dataset_t *prev; 1006 err = dsl_dataset_hold_obj(dp, 1007 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); 1008 1009 dsl_dataset_rele(ds, FTAG); 1010 if (err) 1011 return (err); 1012 ds = prev; 1013 } 1014 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1015 scn->scn_phys.scn_queue_obj, ds->ds_object, 1016 ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0); 1017 dsl_dataset_rele(ds, FTAG); 1018 return (0); 1019} 1020 1021static void 1022dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1023{ 1024 dsl_pool_t *dp = scn->scn_dp; 1025 dsl_dataset_t *ds; 1026 objset_t *os; 1027 1028 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1029 1030 if (dmu_objset_from_ds(ds, &os)) 1031 goto out; 1032 1033 /* 1034 * Only the ZIL in the head (non-snapshot) is valid. Even though 1035 * snapshots can have ZIL block pointers (which may be the same 1036 * BP as in the head), they must be ignored. So we traverse the 1037 * ZIL here, rather than in scan_recurse(), because the regular 1038 * snapshot block-sharing rules don't apply to it. 1039 */ 1040 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds)) 1041 dsl_scan_zil(dp, &os->os_zil_header); 1042 1043 /* 1044 * Iterate over the bps in this ds. 1045 */ 1046 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1047 dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx); 1048 1049 char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); 1050 dsl_dataset_name(ds, dsname); 1051 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1052 "pausing=%u", 1053 (longlong_t)dsobj, dsname, 1054 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1055 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1056 (int)scn->scn_pausing); 1057 kmem_free(dsname, ZFS_MAXNAMELEN); 1058 1059 if (scn->scn_pausing) 1060 goto out; 1061 1062 /* 1063 * We've finished this pass over this dataset. 1064 */ 1065 1066 /* 1067 * If we did not completely visit this dataset, do another pass. 1068 */ 1069 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1070 zfs_dbgmsg("incomplete pass; visiting again"); 1071 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1072 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1073 scn->scn_phys.scn_queue_obj, ds->ds_object, 1074 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1075 goto out; 1076 } 1077 1078 /* 1079 * Add descendent datasets to work queue. 1080 */ 1081 if (ds->ds_phys->ds_next_snap_obj != 0) { 1082 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1083 scn->scn_phys.scn_queue_obj, ds->ds_phys->ds_next_snap_obj, 1084 ds->ds_phys->ds_creation_txg, tx) == 0); 1085 } 1086 if (ds->ds_phys->ds_num_children > 1) { 1087 boolean_t usenext = B_FALSE; 1088 if (ds->ds_phys->ds_next_clones_obj != 0) { 1089 uint64_t count; 1090 /* 1091 * A bug in a previous version of the code could 1092 * cause upgrade_clones_cb() to not set 1093 * ds_next_snap_obj when it should, leading to a 1094 * missing entry. Therefore we can only use the 1095 * next_clones_obj when its count is correct. 1096 */ 1097 int err = zap_count(dp->dp_meta_objset, 1098 ds->ds_phys->ds_next_clones_obj, &count); 1099 if (err == 0 && 1100 count == ds->ds_phys->ds_num_children - 1) 1101 usenext = B_TRUE; 1102 } 1103 1104 if (usenext) { 1105 VERIFY0(zap_join_key(dp->dp_meta_objset, 1106 ds->ds_phys->ds_next_clones_obj, 1107 scn->scn_phys.scn_queue_obj, 1108 ds->ds_phys->ds_creation_txg, tx)); 1109 } else { 1110 struct enqueue_clones_arg eca; 1111 eca.tx = tx; 1112 eca.originobj = ds->ds_object; 1113 1114 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1115 enqueue_clones_cb, &eca, DS_FIND_CHILDREN)); 1116 } 1117 } 1118 1119out: 1120 dsl_dataset_rele(ds, FTAG); 1121} 1122 1123/* ARGSUSED */ 1124static int 1125enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1126{ 1127 dmu_tx_t *tx = arg; 1128 dsl_dataset_t *ds; 1129 int err; 1130 dsl_scan_t *scn = dp->dp_scan; 1131 1132 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1133 if (err) 1134 return (err); 1135 1136 while (ds->ds_phys->ds_prev_snap_obj != 0) { 1137 dsl_dataset_t *prev; 1138 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 1139 FTAG, &prev); 1140 if (err) { 1141 dsl_dataset_rele(ds, FTAG); 1142 return (err); 1143 } 1144 1145 /* 1146 * If this is a clone, we don't need to worry about it for now. 1147 */ 1148 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) { 1149 dsl_dataset_rele(ds, FTAG); 1150 dsl_dataset_rele(prev, FTAG); 1151 return (0); 1152 } 1153 dsl_dataset_rele(ds, FTAG); 1154 ds = prev; 1155 } 1156 1157 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1158 ds->ds_object, ds->ds_phys->ds_prev_snap_txg, tx) == 0); 1159 dsl_dataset_rele(ds, FTAG); 1160 return (0); 1161} 1162 1163/* 1164 * Scrub/dedup interaction. 1165 * 1166 * If there are N references to a deduped block, we don't want to scrub it 1167 * N times -- ideally, we should scrub it exactly once. 1168 * 1169 * We leverage the fact that the dde's replication class (enum ddt_class) 1170 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1171 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1172 * 1173 * To prevent excess scrubbing, the scrub begins by walking the DDT 1174 * to find all blocks with refcnt > 1, and scrubs each of these once. 1175 * Since there are two replication classes which contain blocks with 1176 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1177 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1178 * 1179 * There would be nothing more to say if a block's refcnt couldn't change 1180 * during a scrub, but of course it can so we must account for changes 1181 * in a block's replication class. 1182 * 1183 * Here's an example of what can occur: 1184 * 1185 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1186 * when visited during the top-down scrub phase, it will be scrubbed twice. 1187 * This negates our scrub optimization, but is otherwise harmless. 1188 * 1189 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1190 * on each visit during the top-down scrub phase, it will never be scrubbed. 1191 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1192 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1193 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1194 * while a scrub is in progress, it scrubs the block right then. 1195 */ 1196static void 1197dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1198{ 1199 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1200 ddt_entry_t dde = { 0 }; 1201 int error; 1202 uint64_t n = 0; 1203 1204 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1205 ddt_t *ddt; 1206 1207 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1208 break; 1209 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1210 (longlong_t)ddb->ddb_class, 1211 (longlong_t)ddb->ddb_type, 1212 (longlong_t)ddb->ddb_checksum, 1213 (longlong_t)ddb->ddb_cursor); 1214 1215 /* There should be no pending changes to the dedup table */ 1216 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1217 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1218 1219 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1220 n++; 1221 1222 if (dsl_scan_check_pause(scn, NULL)) 1223 break; 1224 } 1225 1226 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u", 1227 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max, 1228 (int)scn->scn_pausing); 1229 1230 ASSERT(error == 0 || error == ENOENT); 1231 ASSERT(error != ENOENT || 1232 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1233} 1234 1235/* ARGSUSED */ 1236void 1237dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1238 ddt_entry_t *dde, dmu_tx_t *tx) 1239{ 1240 const ddt_key_t *ddk = &dde->dde_key; 1241 ddt_phys_t *ddp = dde->dde_phys; 1242 blkptr_t bp; 1243 zbookmark_t zb = { 0 }; 1244 1245 if (scn->scn_phys.scn_state != DSS_SCANNING) 1246 return; 1247 1248 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1249 if (ddp->ddp_phys_birth == 0 || 1250 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 1251 continue; 1252 ddt_bp_create(checksum, ddk, ddp, &bp); 1253 1254 scn->scn_visited_this_txg++; 1255 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1256 } 1257} 1258 1259static void 1260dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1261{ 1262 dsl_pool_t *dp = scn->scn_dp; 1263 zap_cursor_t zc; 1264 zap_attribute_t za; 1265 1266 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1267 scn->scn_phys.scn_ddt_class_max) { 1268 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1269 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1270 dsl_scan_ddt(scn, tx); 1271 if (scn->scn_pausing) 1272 return; 1273 } 1274 1275 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1276 /* First do the MOS & ORIGIN */ 1277 1278 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1279 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1280 dsl_scan_visit_rootbp(scn, NULL, 1281 &dp->dp_meta_rootbp, tx); 1282 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1283 if (scn->scn_pausing) 1284 return; 1285 1286 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1287 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1288 enqueue_cb, tx, DS_FIND_CHILDREN)); 1289 } else { 1290 dsl_scan_visitds(scn, 1291 dp->dp_origin_snap->ds_object, tx); 1292 } 1293 ASSERT(!scn->scn_pausing); 1294 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1295 ZB_DESTROYED_OBJSET) { 1296 /* 1297 * If we were paused, continue from here. Note if the 1298 * ds we were paused on was deleted, the zb_objset may 1299 * be -1, so we will skip this and find a new objset 1300 * below. 1301 */ 1302 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1303 if (scn->scn_pausing) 1304 return; 1305 } 1306 1307 /* 1308 * In case we were paused right at the end of the ds, zero the 1309 * bookmark so we don't think that we're still trying to resume. 1310 */ 1311 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_t)); 1312 1313 /* keep pulling things out of the zap-object-as-queue */ 1314 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1315 scn->scn_phys.scn_queue_obj), 1316 zap_cursor_retrieve(&zc, &za) == 0) { 1317 dsl_dataset_t *ds; 1318 uint64_t dsobj; 1319 1320 dsobj = strtonum(za.za_name, NULL); 1321 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1322 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1323 1324 /* Set up min/max txg */ 1325 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1326 if (za.za_first_integer != 0) { 1327 scn->scn_phys.scn_cur_min_txg = 1328 MAX(scn->scn_phys.scn_min_txg, 1329 za.za_first_integer); 1330 } else { 1331 scn->scn_phys.scn_cur_min_txg = 1332 MAX(scn->scn_phys.scn_min_txg, 1333 ds->ds_phys->ds_prev_snap_txg); 1334 } 1335 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1336 dsl_dataset_rele(ds, FTAG); 1337 1338 dsl_scan_visitds(scn, dsobj, tx); 1339 zap_cursor_fini(&zc); 1340 if (scn->scn_pausing) 1341 return; 1342 } 1343 zap_cursor_fini(&zc); 1344} 1345 1346static boolean_t 1347dsl_scan_free_should_pause(dsl_scan_t *scn) 1348{ 1349 uint64_t elapsed_nanosecs; 1350 1351 if (zfs_recover) 1352 return (B_FALSE); 1353 1354 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1355 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1356 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms && 1357 txg_sync_waiting(scn->scn_dp)) || 1358 spa_shutting_down(scn->scn_dp->dp_spa)); 1359} 1360 1361static int 1362dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1363{ 1364 dsl_scan_t *scn = arg; 1365 1366 if (!scn->scn_is_bptree || 1367 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1368 if (dsl_scan_free_should_pause(scn)) 1369 return (SET_ERROR(ERESTART)); 1370 } 1371 1372 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1373 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 1374 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1375 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1376 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1377 scn->scn_visited_this_txg++; 1378 return (0); 1379} 1380 1381boolean_t 1382dsl_scan_active(dsl_scan_t *scn) 1383{ 1384 spa_t *spa = scn->scn_dp->dp_spa; 1385 uint64_t used = 0, comp, uncomp; 1386 1387 if (spa->spa_load_state != SPA_LOAD_NONE) 1388 return (B_FALSE); 1389 if (spa_shutting_down(spa)) 1390 return (B_FALSE); 1391 if (scn->scn_phys.scn_state == DSS_SCANNING || 1392 scn->scn_async_destroying) 1393 return (B_TRUE); 1394 1395 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1396 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1397 &used, &comp, &uncomp); 1398 } 1399 return (used != 0); 1400} 1401 1402void 1403dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1404{ 1405 dsl_scan_t *scn = dp->dp_scan; 1406 spa_t *spa = dp->dp_spa; 1407 int err; 1408 1409 /* 1410 * Check for scn_restart_txg before checking spa_load_state, so 1411 * that we can restart an old-style scan while the pool is being 1412 * imported (see dsl_scan_init). 1413 */ 1414 if (scn->scn_restart_txg != 0 && 1415 scn->scn_restart_txg <= tx->tx_txg) { 1416 pool_scan_func_t func = POOL_SCAN_SCRUB; 1417 dsl_scan_done(scn, B_FALSE, tx); 1418 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1419 func = POOL_SCAN_RESILVER; 1420 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1421 func, tx->tx_txg); 1422 dsl_scan_setup_sync(&func, tx); 1423 } 1424 1425 if (!dsl_scan_active(scn) || 1426 spa_sync_pass(dp->dp_spa) > 1) 1427 return; 1428 1429 scn->scn_visited_this_txg = 0; 1430 scn->scn_pausing = B_FALSE; 1431 scn->scn_sync_start_time = gethrtime(); 1432 spa->spa_scrub_active = B_TRUE; 1433 1434 /* 1435 * First process the free list. If we pause the free, don't do 1436 * any scanning. This ensures that there is no free list when 1437 * we are scanning, so the scan code doesn't have to worry about 1438 * traversing it. 1439 */ 1440 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1441 scn->scn_is_bptree = B_FALSE; 1442 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1443 NULL, ZIO_FLAG_MUSTSUCCEED); 1444 err = bpobj_iterate(&dp->dp_free_bpobj, 1445 dsl_scan_free_block_cb, scn, tx); 1446 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1447 1448 if (err == 0 && spa_feature_is_active(spa, 1449 SPA_FEATURE_ASYNC_DESTROY)) { 1450 ASSERT(scn->scn_async_destroying); 1451 scn->scn_is_bptree = B_TRUE; 1452 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1453 NULL, ZIO_FLAG_MUSTSUCCEED); 1454 err = bptree_iterate(dp->dp_meta_objset, 1455 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, 1456 scn, tx); 1457 VERIFY0(zio_wait(scn->scn_zio_root)); 1458 1459 if (err == 0) { 1460 /* finished; deactivate async destroy feature */ 1461 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, 1462 tx); 1463 ASSERT(!spa_feature_is_active(spa, 1464 SPA_FEATURE_ASYNC_DESTROY)); 1465 VERIFY0(zap_remove(dp->dp_meta_objset, 1466 DMU_POOL_DIRECTORY_OBJECT, 1467 DMU_POOL_BPTREE_OBJ, tx)); 1468 VERIFY0(bptree_free(dp->dp_meta_objset, 1469 dp->dp_bptree_obj, tx)); 1470 dp->dp_bptree_obj = 0; 1471 scn->scn_async_destroying = B_FALSE; 1472 } 1473 } 1474 if (scn->scn_visited_this_txg) { 1475 zfs_dbgmsg("freed %llu blocks in %llums from " 1476 "free_bpobj/bptree txg %llu", 1477 (longlong_t)scn->scn_visited_this_txg, 1478 (longlong_t) 1479 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 1480 (longlong_t)tx->tx_txg); 1481 scn->scn_visited_this_txg = 0; 1482 /* 1483 * Re-sync the ddt so that we can further modify 1484 * it when doing bprewrite. 1485 */ 1486 ddt_sync(spa, tx->tx_txg); 1487 } 1488 if (err == ERESTART) 1489 return; 1490 } 1491 1492 if (scn->scn_phys.scn_state != DSS_SCANNING) 1493 return; 1494 1495 if (scn->scn_done_txg == tx->tx_txg) { 1496 ASSERT(!scn->scn_pausing); 1497 /* finished with scan. */ 1498 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg); 1499 dsl_scan_done(scn, B_TRUE, tx); 1500 ASSERT3U(spa->spa_scrub_inflight, ==, 0); 1501 dsl_scan_sync_state(scn, tx); 1502 return; 1503 } 1504 1505 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1506 scn->scn_phys.scn_ddt_class_max) { 1507 zfs_dbgmsg("doing scan sync txg %llu; " 1508 "ddt bm=%llu/%llu/%llu/%llx", 1509 (longlong_t)tx->tx_txg, 1510 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1511 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1512 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1513 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1514 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1515 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1516 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1517 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1518 } else { 1519 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1520 (longlong_t)tx->tx_txg, 1521 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1522 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1523 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1524 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1525 } 1526 1527 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1528 NULL, ZIO_FLAG_CANFAIL); 1529 dsl_pool_config_enter(dp, FTAG); 1530 dsl_scan_visit(scn, tx); 1531 dsl_pool_config_exit(dp, FTAG); 1532 (void) zio_wait(scn->scn_zio_root); 1533 scn->scn_zio_root = NULL; 1534 1535 zfs_dbgmsg("visited %llu blocks in %llums", 1536 (longlong_t)scn->scn_visited_this_txg, 1537 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time)); 1538 1539 if (!scn->scn_pausing) { 1540 scn->scn_done_txg = tx->tx_txg + 1; 1541 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu", 1542 tx->tx_txg, scn->scn_done_txg); 1543 } 1544 1545 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1546 mutex_enter(&spa->spa_scrub_lock); 1547 while (spa->spa_scrub_inflight > 0) { 1548 cv_wait(&spa->spa_scrub_io_cv, 1549 &spa->spa_scrub_lock); 1550 } 1551 mutex_exit(&spa->spa_scrub_lock); 1552 } 1553 1554 dsl_scan_sync_state(scn, tx); 1555} 1556 1557/* 1558 * This will start a new scan, or restart an existing one. 1559 */ 1560void 1561dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1562{ 1563 if (txg == 0) { 1564 dmu_tx_t *tx; 1565 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1566 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1567 1568 txg = dmu_tx_get_txg(tx); 1569 dp->dp_scan->scn_restart_txg = txg; 1570 dmu_tx_commit(tx); 1571 } else { 1572 dp->dp_scan->scn_restart_txg = txg; 1573 } 1574 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1575} 1576 1577boolean_t 1578dsl_scan_resilvering(dsl_pool_t *dp) 1579{ 1580 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1581 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1582} 1583 1584/* 1585 * scrub consumers 1586 */ 1587 1588static void 1589count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1590{ 1591 int i; 1592 1593 /* 1594 * If we resume after a reboot, zab will be NULL; don't record 1595 * incomplete stats in that case. 1596 */ 1597 if (zab == NULL) 1598 return; 1599 1600 for (i = 0; i < 4; i++) { 1601 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1602 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1603 if (t & DMU_OT_NEWTYPE) 1604 t = DMU_OT_OTHER; 1605 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1606 int equal; 1607 1608 zb->zb_count++; 1609 zb->zb_asize += BP_GET_ASIZE(bp); 1610 zb->zb_lsize += BP_GET_LSIZE(bp); 1611 zb->zb_psize += BP_GET_PSIZE(bp); 1612 zb->zb_gangs += BP_COUNT_GANG(bp); 1613 1614 switch (BP_GET_NDVAS(bp)) { 1615 case 2: 1616 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1617 DVA_GET_VDEV(&bp->blk_dva[1])) 1618 zb->zb_ditto_2_of_2_samevdev++; 1619 break; 1620 case 3: 1621 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1622 DVA_GET_VDEV(&bp->blk_dva[1])) + 1623 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1624 DVA_GET_VDEV(&bp->blk_dva[2])) + 1625 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1626 DVA_GET_VDEV(&bp->blk_dva[2])); 1627 if (equal == 1) 1628 zb->zb_ditto_2_of_3_samevdev++; 1629 else if (equal == 3) 1630 zb->zb_ditto_3_of_3_samevdev++; 1631 break; 1632 } 1633 } 1634} 1635 1636static void 1637dsl_scan_scrub_done(zio_t *zio) 1638{ 1639 spa_t *spa = zio->io_spa; 1640 1641 zio_data_buf_free(zio->io_data, zio->io_size); 1642 1643 mutex_enter(&spa->spa_scrub_lock); 1644 spa->spa_scrub_inflight--; 1645 cv_broadcast(&spa->spa_scrub_io_cv); 1646 1647 if (zio->io_error && (zio->io_error != ECKSUM || 1648 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1649 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1650 } 1651 mutex_exit(&spa->spa_scrub_lock); 1652} 1653 1654static int 1655dsl_scan_scrub_cb(dsl_pool_t *dp, 1656 const blkptr_t *bp, const zbookmark_t *zb) 1657{ 1658 dsl_scan_t *scn = dp->dp_scan; 1659 size_t size = BP_GET_PSIZE(bp); 1660 spa_t *spa = dp->dp_spa; 1661 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1662 boolean_t needs_io; 1663 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1664 unsigned int scan_delay = 0; 1665 1666 if (phys_birth <= scn->scn_phys.scn_min_txg || 1667 phys_birth >= scn->scn_phys.scn_max_txg) 1668 return (0); 1669 1670 count_block(dp->dp_blkstats, bp); 1671 1672 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1673 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1674 zio_flags |= ZIO_FLAG_SCRUB; 1675 needs_io = B_TRUE; 1676 scan_delay = zfs_scrub_delay; 1677 } else { 1678 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 1679 zio_flags |= ZIO_FLAG_RESILVER; 1680 needs_io = B_FALSE; 1681 scan_delay = zfs_resilver_delay; 1682 } 1683 1684 /* If it's an intent log block, failure is expected. */ 1685 if (zb->zb_level == ZB_ZIL_LEVEL) 1686 zio_flags |= ZIO_FLAG_SPECULATIVE; 1687 1688 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1689 vdev_t *vd = vdev_lookup_top(spa, 1690 DVA_GET_VDEV(&bp->blk_dva[d])); 1691 1692 /* 1693 * Keep track of how much data we've examined so that 1694 * zpool(1M) status can make useful progress reports. 1695 */ 1696 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1697 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1698 1699 /* if it's a resilver, this may not be in the target range */ 1700 if (!needs_io) { 1701 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1702 /* 1703 * Gang members may be spread across multiple 1704 * vdevs, so the best estimate we have is the 1705 * scrub range, which has already been checked. 1706 * XXX -- it would be better to change our 1707 * allocation policy to ensure that all 1708 * gang members reside on the same vdev. 1709 */ 1710 needs_io = B_TRUE; 1711 } else { 1712 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1713 phys_birth, 1); 1714 } 1715 } 1716 } 1717 1718 if (needs_io && !zfs_no_scrub_io) { 1719 vdev_t *rvd = spa->spa_root_vdev; 1720 uint64_t maxinflight = rvd->vdev_children * 1721 MAX(zfs_top_maxinflight, 1); 1722 void *data = zio_data_buf_alloc(size); 1723 1724 mutex_enter(&spa->spa_scrub_lock); 1725 while (spa->spa_scrub_inflight >= maxinflight) 1726 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1727 spa->spa_scrub_inflight++; 1728 mutex_exit(&spa->spa_scrub_lock); 1729 1730 /* 1731 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1732 * then throttle our workload to limit the impact of a scan. 1733 */ 1734 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1735 delay(MAX((int)scan_delay, 0)); 1736 1737 zio_nowait(zio_read(NULL, spa, bp, data, size, 1738 dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB, 1739 zio_flags, zb)); 1740 } 1741 1742 /* do not relocate this block */ 1743 return (0); 1744} 1745 1746int 1747dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 1748{ 1749 spa_t *spa = dp->dp_spa; 1750 1751 /* 1752 * Purge all vdev caches and probe all devices. We do this here 1753 * rather than in sync context because this requires a writer lock 1754 * on the spa_config lock, which we can't do from sync context. The 1755 * spa_scrub_reopen flag indicates that vdev_open() should not 1756 * attempt to start another scrub. 1757 */ 1758 spa_vdev_state_enter(spa, SCL_NONE); 1759 spa->spa_scrub_reopen = B_TRUE; 1760 vdev_reopen(spa->spa_root_vdev); 1761 spa->spa_scrub_reopen = B_FALSE; 1762 (void) spa_vdev_state_exit(spa, NULL, 0); 1763 1764 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 1765 dsl_scan_setup_sync, &func, 0)); 1766} 1767