spa_misc.c revision 262093
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 */ 27 28#include <sys/zfs_context.h> 29#include <sys/spa_impl.h> 30#include <sys/spa_boot.h> 31#include <sys/zio.h> 32#include <sys/zio_checksum.h> 33#include <sys/zio_compress.h> 34#include <sys/dmu.h> 35#include <sys/dmu_tx.h> 36#include <sys/zap.h> 37#include <sys/zil.h> 38#include <sys/vdev_impl.h> 39#include <sys/metaslab.h> 40#include <sys/uberblock_impl.h> 41#include <sys/txg.h> 42#include <sys/avl.h> 43#include <sys/unique.h> 44#include <sys/dsl_pool.h> 45#include <sys/dsl_dir.h> 46#include <sys/dsl_prop.h> 47#include <sys/dsl_scan.h> 48#include <sys/fs/zfs.h> 49#include <sys/metaslab_impl.h> 50#include <sys/arc.h> 51#include <sys/ddt.h> 52#include "zfs_prop.h" 53#include "zfeature_common.h" 54 55/* 56 * SPA locking 57 * 58 * There are four basic locks for managing spa_t structures: 59 * 60 * spa_namespace_lock (global mutex) 61 * 62 * This lock must be acquired to do any of the following: 63 * 64 * - Lookup a spa_t by name 65 * - Add or remove a spa_t from the namespace 66 * - Increase spa_refcount from non-zero 67 * - Check if spa_refcount is zero 68 * - Rename a spa_t 69 * - add/remove/attach/detach devices 70 * - Held for the duration of create/destroy/import/export 71 * 72 * It does not need to handle recursion. A create or destroy may 73 * reference objects (files or zvols) in other pools, but by 74 * definition they must have an existing reference, and will never need 75 * to lookup a spa_t by name. 76 * 77 * spa_refcount (per-spa refcount_t protected by mutex) 78 * 79 * This reference count keep track of any active users of the spa_t. The 80 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 81 * the refcount is never really 'zero' - opening a pool implicitly keeps 82 * some references in the DMU. Internally we check against spa_minref, but 83 * present the image of a zero/non-zero value to consumers. 84 * 85 * spa_config_lock[] (per-spa array of rwlocks) 86 * 87 * This protects the spa_t from config changes, and must be held in 88 * the following circumstances: 89 * 90 * - RW_READER to perform I/O to the spa 91 * - RW_WRITER to change the vdev config 92 * 93 * The locking order is fairly straightforward: 94 * 95 * spa_namespace_lock -> spa_refcount 96 * 97 * The namespace lock must be acquired to increase the refcount from 0 98 * or to check if it is zero. 99 * 100 * spa_refcount -> spa_config_lock[] 101 * 102 * There must be at least one valid reference on the spa_t to acquire 103 * the config lock. 104 * 105 * spa_namespace_lock -> spa_config_lock[] 106 * 107 * The namespace lock must always be taken before the config lock. 108 * 109 * 110 * The spa_namespace_lock can be acquired directly and is globally visible. 111 * 112 * The namespace is manipulated using the following functions, all of which 113 * require the spa_namespace_lock to be held. 114 * 115 * spa_lookup() Lookup a spa_t by name. 116 * 117 * spa_add() Create a new spa_t in the namespace. 118 * 119 * spa_remove() Remove a spa_t from the namespace. This also 120 * frees up any memory associated with the spa_t. 121 * 122 * spa_next() Returns the next spa_t in the system, or the 123 * first if NULL is passed. 124 * 125 * spa_evict_all() Shutdown and remove all spa_t structures in 126 * the system. 127 * 128 * spa_guid_exists() Determine whether a pool/device guid exists. 129 * 130 * The spa_refcount is manipulated using the following functions: 131 * 132 * spa_open_ref() Adds a reference to the given spa_t. Must be 133 * called with spa_namespace_lock held if the 134 * refcount is currently zero. 135 * 136 * spa_close() Remove a reference from the spa_t. This will 137 * not free the spa_t or remove it from the 138 * namespace. No locking is required. 139 * 140 * spa_refcount_zero() Returns true if the refcount is currently 141 * zero. Must be called with spa_namespace_lock 142 * held. 143 * 144 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 145 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 146 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 147 * 148 * To read the configuration, it suffices to hold one of these locks as reader. 149 * To modify the configuration, you must hold all locks as writer. To modify 150 * vdev state without altering the vdev tree's topology (e.g. online/offline), 151 * you must hold SCL_STATE and SCL_ZIO as writer. 152 * 153 * We use these distinct config locks to avoid recursive lock entry. 154 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 155 * block allocations (SCL_ALLOC), which may require reading space maps 156 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 157 * 158 * The spa config locks cannot be normal rwlocks because we need the 159 * ability to hand off ownership. For example, SCL_ZIO is acquired 160 * by the issuing thread and later released by an interrupt thread. 161 * They do, however, obey the usual write-wanted semantics to prevent 162 * writer (i.e. system administrator) starvation. 163 * 164 * The lock acquisition rules are as follows: 165 * 166 * SCL_CONFIG 167 * Protects changes to the vdev tree topology, such as vdev 168 * add/remove/attach/detach. Protects the dirty config list 169 * (spa_config_dirty_list) and the set of spares and l2arc devices. 170 * 171 * SCL_STATE 172 * Protects changes to pool state and vdev state, such as vdev 173 * online/offline/fault/degrade/clear. Protects the dirty state list 174 * (spa_state_dirty_list) and global pool state (spa_state). 175 * 176 * SCL_ALLOC 177 * Protects changes to metaslab groups and classes. 178 * Held as reader by metaslab_alloc() and metaslab_claim(). 179 * 180 * SCL_ZIO 181 * Held by bp-level zios (those which have no io_vd upon entry) 182 * to prevent changes to the vdev tree. The bp-level zio implicitly 183 * protects all of its vdev child zios, which do not hold SCL_ZIO. 184 * 185 * SCL_FREE 186 * Protects changes to metaslab groups and classes. 187 * Held as reader by metaslab_free(). SCL_FREE is distinct from 188 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 189 * blocks in zio_done() while another i/o that holds either 190 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 191 * 192 * SCL_VDEV 193 * Held as reader to prevent changes to the vdev tree during trivial 194 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 195 * other locks, and lower than all of them, to ensure that it's safe 196 * to acquire regardless of caller context. 197 * 198 * In addition, the following rules apply: 199 * 200 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 201 * The lock ordering is SCL_CONFIG > spa_props_lock. 202 * 203 * (b) I/O operations on leaf vdevs. For any zio operation that takes 204 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 205 * or zio_write_phys() -- the caller must ensure that the config cannot 206 * cannot change in the interim, and that the vdev cannot be reopened. 207 * SCL_STATE as reader suffices for both. 208 * 209 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 210 * 211 * spa_vdev_enter() Acquire the namespace lock and the config lock 212 * for writing. 213 * 214 * spa_vdev_exit() Release the config lock, wait for all I/O 215 * to complete, sync the updated configs to the 216 * cache, and release the namespace lock. 217 * 218 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 219 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 220 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 221 * 222 * spa_rename() is also implemented within this file since it requires 223 * manipulation of the namespace. 224 */ 225 226static avl_tree_t spa_namespace_avl; 227kmutex_t spa_namespace_lock; 228static kcondvar_t spa_namespace_cv; 229static int spa_active_count; 230int spa_max_replication_override = SPA_DVAS_PER_BP; 231 232static kmutex_t spa_spare_lock; 233static avl_tree_t spa_spare_avl; 234static kmutex_t spa_l2cache_lock; 235static avl_tree_t spa_l2cache_avl; 236 237kmem_cache_t *spa_buffer_pool; 238int spa_mode_global; 239 240#ifdef ZFS_DEBUG 241/* Everything except dprintf and spa is on by default in debug builds */ 242int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 243#else 244int zfs_flags = 0; 245#endif 246SYSCTL_DECL(_debug); 247TUNABLE_INT("debug.zfs_flags", &zfs_flags); 248SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 249 "ZFS debug flags."); 250 251/* 252 * zfs_recover can be set to nonzero to attempt to recover from 253 * otherwise-fatal errors, typically caused by on-disk corruption. When 254 * set, calls to zfs_panic_recover() will turn into warning messages. 255 */ 256int zfs_recover = 0; 257SYSCTL_DECL(_vfs_zfs); 258TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 259SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0, 260 "Try to recover from otherwise-fatal errors."); 261 262/* 263 * Expiration time in milliseconds. This value has two meanings. First it is 264 * used to determine when the spa_deadman() logic should fire. By default the 265 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 266 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 267 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 268 * in a system panic. 269 */ 270uint64_t zfs_deadman_synctime_ms = 1000000ULL; 271TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 272SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 273 &zfs_deadman_synctime_ms, 0, 274 "Stalled ZFS I/O expiration time in milliseconds"); 275 276/* 277 * Check time in milliseconds. This defines the frequency at which we check 278 * for hung I/O. 279 */ 280uint64_t zfs_deadman_checktime_ms = 5000ULL; 281TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 282SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 283 &zfs_deadman_checktime_ms, 0, 284 "Period of checks for stalled ZFS I/O in milliseconds"); 285 286/* 287 * Default value of -1 for zfs_deadman_enabled is resolved in 288 * zfs_deadman_init() 289 */ 290int zfs_deadman_enabled = -1; 291TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 292SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 293 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 294 295/* 296 * The worst case is single-sector max-parity RAID-Z blocks, in which 297 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 298 * times the size; so just assume that. Add to this the fact that 299 * we can have up to 3 DVAs per bp, and one more factor of 2 because 300 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 301 * the worst case is: 302 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 303 */ 304int spa_asize_inflation = 24; 305 306#ifndef illumos 307#ifdef _KERNEL 308static void 309zfs_deadman_init() 310{ 311 /* 312 * If we are not i386 or amd64 or in a virtual machine, 313 * disable ZFS deadman thread by default 314 */ 315 if (zfs_deadman_enabled == -1) { 316#if defined(__amd64__) || defined(__i386__) 317 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 318#else 319 zfs_deadman_enabled = 0; 320#endif 321 } 322} 323#endif /* _KERNEL */ 324#endif /* !illumos */ 325 326/* 327 * ========================================================================== 328 * SPA config locking 329 * ========================================================================== 330 */ 331static void 332spa_config_lock_init(spa_t *spa) 333{ 334 for (int i = 0; i < SCL_LOCKS; i++) { 335 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 336 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 337 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 338 refcount_create_untracked(&scl->scl_count); 339 scl->scl_writer = NULL; 340 scl->scl_write_wanted = 0; 341 } 342} 343 344static void 345spa_config_lock_destroy(spa_t *spa) 346{ 347 for (int i = 0; i < SCL_LOCKS; i++) { 348 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 349 mutex_destroy(&scl->scl_lock); 350 cv_destroy(&scl->scl_cv); 351 refcount_destroy(&scl->scl_count); 352 ASSERT(scl->scl_writer == NULL); 353 ASSERT(scl->scl_write_wanted == 0); 354 } 355} 356 357int 358spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 359{ 360 for (int i = 0; i < SCL_LOCKS; i++) { 361 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 362 if (!(locks & (1 << i))) 363 continue; 364 mutex_enter(&scl->scl_lock); 365 if (rw == RW_READER) { 366 if (scl->scl_writer || scl->scl_write_wanted) { 367 mutex_exit(&scl->scl_lock); 368 spa_config_exit(spa, locks ^ (1 << i), tag); 369 return (0); 370 } 371 } else { 372 ASSERT(scl->scl_writer != curthread); 373 if (!refcount_is_zero(&scl->scl_count)) { 374 mutex_exit(&scl->scl_lock); 375 spa_config_exit(spa, locks ^ (1 << i), tag); 376 return (0); 377 } 378 scl->scl_writer = curthread; 379 } 380 (void) refcount_add(&scl->scl_count, tag); 381 mutex_exit(&scl->scl_lock); 382 } 383 return (1); 384} 385 386void 387spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 388{ 389 int wlocks_held = 0; 390 391 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 392 393 for (int i = 0; i < SCL_LOCKS; i++) { 394 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 395 if (scl->scl_writer == curthread) 396 wlocks_held |= (1 << i); 397 if (!(locks & (1 << i))) 398 continue; 399 mutex_enter(&scl->scl_lock); 400 if (rw == RW_READER) { 401 while (scl->scl_writer || scl->scl_write_wanted) { 402 cv_wait(&scl->scl_cv, &scl->scl_lock); 403 } 404 } else { 405 ASSERT(scl->scl_writer != curthread); 406 while (!refcount_is_zero(&scl->scl_count)) { 407 scl->scl_write_wanted++; 408 cv_wait(&scl->scl_cv, &scl->scl_lock); 409 scl->scl_write_wanted--; 410 } 411 scl->scl_writer = curthread; 412 } 413 (void) refcount_add(&scl->scl_count, tag); 414 mutex_exit(&scl->scl_lock); 415 } 416 ASSERT(wlocks_held <= locks); 417} 418 419void 420spa_config_exit(spa_t *spa, int locks, void *tag) 421{ 422 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 423 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 424 if (!(locks & (1 << i))) 425 continue; 426 mutex_enter(&scl->scl_lock); 427 ASSERT(!refcount_is_zero(&scl->scl_count)); 428 if (refcount_remove(&scl->scl_count, tag) == 0) { 429 ASSERT(scl->scl_writer == NULL || 430 scl->scl_writer == curthread); 431 scl->scl_writer = NULL; /* OK in either case */ 432 cv_broadcast(&scl->scl_cv); 433 } 434 mutex_exit(&scl->scl_lock); 435 } 436} 437 438int 439spa_config_held(spa_t *spa, int locks, krw_t rw) 440{ 441 int locks_held = 0; 442 443 for (int i = 0; i < SCL_LOCKS; i++) { 444 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 445 if (!(locks & (1 << i))) 446 continue; 447 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 448 (rw == RW_WRITER && scl->scl_writer == curthread)) 449 locks_held |= 1 << i; 450 } 451 452 return (locks_held); 453} 454 455/* 456 * ========================================================================== 457 * SPA namespace functions 458 * ========================================================================== 459 */ 460 461/* 462 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 463 * Returns NULL if no matching spa_t is found. 464 */ 465spa_t * 466spa_lookup(const char *name) 467{ 468 static spa_t search; /* spa_t is large; don't allocate on stack */ 469 spa_t *spa; 470 avl_index_t where; 471 char *cp; 472 473 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 474 475 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 476 477 /* 478 * If it's a full dataset name, figure out the pool name and 479 * just use that. 480 */ 481 cp = strpbrk(search.spa_name, "/@"); 482 if (cp != NULL) 483 *cp = '\0'; 484 485 spa = avl_find(&spa_namespace_avl, &search, &where); 486 487 return (spa); 488} 489 490/* 491 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 492 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 493 * looking for potentially hung I/Os. 494 */ 495void 496spa_deadman(void *arg) 497{ 498 spa_t *spa = arg; 499 500 /* 501 * Disable the deadman timer if the pool is suspended. 502 */ 503 if (spa_suspended(spa)) { 504#ifdef illumos 505 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 506#else 507 /* Nothing. just don't schedule any future callouts. */ 508#endif 509 return; 510 } 511 512 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 513 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 514 ++spa->spa_deadman_calls); 515 if (zfs_deadman_enabled) 516 vdev_deadman(spa->spa_root_vdev); 517} 518 519/* 520 * Create an uninitialized spa_t with the given name. Requires 521 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 522 * exist by calling spa_lookup() first. 523 */ 524spa_t * 525spa_add(const char *name, nvlist_t *config, const char *altroot) 526{ 527 spa_t *spa; 528 spa_config_dirent_t *dp; 529#ifdef illumos 530 cyc_handler_t hdlr; 531 cyc_time_t when; 532#endif 533 534 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 535 536 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 537 538 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 539 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 540 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 541 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 542 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 543 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 544 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 545 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 546 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 547 548 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 549 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 550 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 551 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 552 553 for (int t = 0; t < TXG_SIZE; t++) 554 bplist_create(&spa->spa_free_bplist[t]); 555 556 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 557 spa->spa_state = POOL_STATE_UNINITIALIZED; 558 spa->spa_freeze_txg = UINT64_MAX; 559 spa->spa_final_txg = UINT64_MAX; 560 spa->spa_load_max_txg = UINT64_MAX; 561 spa->spa_proc = &p0; 562 spa->spa_proc_state = SPA_PROC_NONE; 563 564#ifdef illumos 565 hdlr.cyh_func = spa_deadman; 566 hdlr.cyh_arg = spa; 567 hdlr.cyh_level = CY_LOW_LEVEL; 568#endif 569 570 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 571 572#ifdef illumos 573 /* 574 * This determines how often we need to check for hung I/Os after 575 * the cyclic has already fired. Since checking for hung I/Os is 576 * an expensive operation we don't want to check too frequently. 577 * Instead wait for 5 seconds before checking again. 578 */ 579 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 580 when.cyt_when = CY_INFINITY; 581 mutex_enter(&cpu_lock); 582 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 583 mutex_exit(&cpu_lock); 584#else /* !illumos */ 585#ifdef _KERNEL 586 callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE); 587#endif 588#endif 589 refcount_create(&spa->spa_refcount); 590 spa_config_lock_init(spa); 591 592 avl_add(&spa_namespace_avl, spa); 593 594 /* 595 * Set the alternate root, if there is one. 596 */ 597 if (altroot) { 598 spa->spa_root = spa_strdup(altroot); 599 spa_active_count++; 600 } 601 602 /* 603 * Every pool starts with the default cachefile 604 */ 605 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 606 offsetof(spa_config_dirent_t, scd_link)); 607 608 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 609 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 610 list_insert_head(&spa->spa_config_list, dp); 611 612 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 613 KM_SLEEP) == 0); 614 615 if (config != NULL) { 616 nvlist_t *features; 617 618 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 619 &features) == 0) { 620 VERIFY(nvlist_dup(features, &spa->spa_label_features, 621 0) == 0); 622 } 623 624 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 625 } 626 627 if (spa->spa_label_features == NULL) { 628 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 629 KM_SLEEP) == 0); 630 } 631 632 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 633 634 return (spa); 635} 636 637/* 638 * Removes a spa_t from the namespace, freeing up any memory used. Requires 639 * spa_namespace_lock. This is called only after the spa_t has been closed and 640 * deactivated. 641 */ 642void 643spa_remove(spa_t *spa) 644{ 645 spa_config_dirent_t *dp; 646 647 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 648 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 649 650 nvlist_free(spa->spa_config_splitting); 651 652 avl_remove(&spa_namespace_avl, spa); 653 cv_broadcast(&spa_namespace_cv); 654 655 if (spa->spa_root) { 656 spa_strfree(spa->spa_root); 657 spa_active_count--; 658 } 659 660 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 661 list_remove(&spa->spa_config_list, dp); 662 if (dp->scd_path != NULL) 663 spa_strfree(dp->scd_path); 664 kmem_free(dp, sizeof (spa_config_dirent_t)); 665 } 666 667 list_destroy(&spa->spa_config_list); 668 669 nvlist_free(spa->spa_label_features); 670 nvlist_free(spa->spa_load_info); 671 spa_config_set(spa, NULL); 672 673#ifdef illumos 674 mutex_enter(&cpu_lock); 675 if (spa->spa_deadman_cycid != CYCLIC_NONE) 676 cyclic_remove(spa->spa_deadman_cycid); 677 mutex_exit(&cpu_lock); 678 spa->spa_deadman_cycid = CYCLIC_NONE; 679#else /* !illumos */ 680#ifdef _KERNEL 681 callout_drain(&spa->spa_deadman_cycid); 682#endif 683#endif 684 685 refcount_destroy(&spa->spa_refcount); 686 687 spa_config_lock_destroy(spa); 688 689 for (int t = 0; t < TXG_SIZE; t++) 690 bplist_destroy(&spa->spa_free_bplist[t]); 691 692 cv_destroy(&spa->spa_async_cv); 693 cv_destroy(&spa->spa_proc_cv); 694 cv_destroy(&spa->spa_scrub_io_cv); 695 cv_destroy(&spa->spa_suspend_cv); 696 697 mutex_destroy(&spa->spa_async_lock); 698 mutex_destroy(&spa->spa_errlist_lock); 699 mutex_destroy(&spa->spa_errlog_lock); 700 mutex_destroy(&spa->spa_history_lock); 701 mutex_destroy(&spa->spa_proc_lock); 702 mutex_destroy(&spa->spa_props_lock); 703 mutex_destroy(&spa->spa_scrub_lock); 704 mutex_destroy(&spa->spa_suspend_lock); 705 mutex_destroy(&spa->spa_vdev_top_lock); 706 707 kmem_free(spa, sizeof (spa_t)); 708} 709 710/* 711 * Given a pool, return the next pool in the namespace, or NULL if there is 712 * none. If 'prev' is NULL, return the first pool. 713 */ 714spa_t * 715spa_next(spa_t *prev) 716{ 717 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 718 719 if (prev) 720 return (AVL_NEXT(&spa_namespace_avl, prev)); 721 else 722 return (avl_first(&spa_namespace_avl)); 723} 724 725/* 726 * ========================================================================== 727 * SPA refcount functions 728 * ========================================================================== 729 */ 730 731/* 732 * Add a reference to the given spa_t. Must have at least one reference, or 733 * have the namespace lock held. 734 */ 735void 736spa_open_ref(spa_t *spa, void *tag) 737{ 738 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 739 MUTEX_HELD(&spa_namespace_lock)); 740 (void) refcount_add(&spa->spa_refcount, tag); 741} 742 743/* 744 * Remove a reference to the given spa_t. Must have at least one reference, or 745 * have the namespace lock held. 746 */ 747void 748spa_close(spa_t *spa, void *tag) 749{ 750 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 751 MUTEX_HELD(&spa_namespace_lock)); 752 (void) refcount_remove(&spa->spa_refcount, tag); 753} 754 755/* 756 * Check to see if the spa refcount is zero. Must be called with 757 * spa_namespace_lock held. We really compare against spa_minref, which is the 758 * number of references acquired when opening a pool 759 */ 760boolean_t 761spa_refcount_zero(spa_t *spa) 762{ 763 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 764 765 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 766} 767 768/* 769 * ========================================================================== 770 * SPA spare and l2cache tracking 771 * ========================================================================== 772 */ 773 774/* 775 * Hot spares and cache devices are tracked using the same code below, 776 * for 'auxiliary' devices. 777 */ 778 779typedef struct spa_aux { 780 uint64_t aux_guid; 781 uint64_t aux_pool; 782 avl_node_t aux_avl; 783 int aux_count; 784} spa_aux_t; 785 786static int 787spa_aux_compare(const void *a, const void *b) 788{ 789 const spa_aux_t *sa = a; 790 const spa_aux_t *sb = b; 791 792 if (sa->aux_guid < sb->aux_guid) 793 return (-1); 794 else if (sa->aux_guid > sb->aux_guid) 795 return (1); 796 else 797 return (0); 798} 799 800void 801spa_aux_add(vdev_t *vd, avl_tree_t *avl) 802{ 803 avl_index_t where; 804 spa_aux_t search; 805 spa_aux_t *aux; 806 807 search.aux_guid = vd->vdev_guid; 808 if ((aux = avl_find(avl, &search, &where)) != NULL) { 809 aux->aux_count++; 810 } else { 811 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 812 aux->aux_guid = vd->vdev_guid; 813 aux->aux_count = 1; 814 avl_insert(avl, aux, where); 815 } 816} 817 818void 819spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 820{ 821 spa_aux_t search; 822 spa_aux_t *aux; 823 avl_index_t where; 824 825 search.aux_guid = vd->vdev_guid; 826 aux = avl_find(avl, &search, &where); 827 828 ASSERT(aux != NULL); 829 830 if (--aux->aux_count == 0) { 831 avl_remove(avl, aux); 832 kmem_free(aux, sizeof (spa_aux_t)); 833 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 834 aux->aux_pool = 0ULL; 835 } 836} 837 838boolean_t 839spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 840{ 841 spa_aux_t search, *found; 842 843 search.aux_guid = guid; 844 found = avl_find(avl, &search, NULL); 845 846 if (pool) { 847 if (found) 848 *pool = found->aux_pool; 849 else 850 *pool = 0ULL; 851 } 852 853 if (refcnt) { 854 if (found) 855 *refcnt = found->aux_count; 856 else 857 *refcnt = 0; 858 } 859 860 return (found != NULL); 861} 862 863void 864spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 865{ 866 spa_aux_t search, *found; 867 avl_index_t where; 868 869 search.aux_guid = vd->vdev_guid; 870 found = avl_find(avl, &search, &where); 871 ASSERT(found != NULL); 872 ASSERT(found->aux_pool == 0ULL); 873 874 found->aux_pool = spa_guid(vd->vdev_spa); 875} 876 877/* 878 * Spares are tracked globally due to the following constraints: 879 * 880 * - A spare may be part of multiple pools. 881 * - A spare may be added to a pool even if it's actively in use within 882 * another pool. 883 * - A spare in use in any pool can only be the source of a replacement if 884 * the target is a spare in the same pool. 885 * 886 * We keep track of all spares on the system through the use of a reference 887 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 888 * spare, then we bump the reference count in the AVL tree. In addition, we set 889 * the 'vdev_isspare' member to indicate that the device is a spare (active or 890 * inactive). When a spare is made active (used to replace a device in the 891 * pool), we also keep track of which pool its been made a part of. 892 * 893 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 894 * called under the spa_namespace lock as part of vdev reconfiguration. The 895 * separate spare lock exists for the status query path, which does not need to 896 * be completely consistent with respect to other vdev configuration changes. 897 */ 898 899static int 900spa_spare_compare(const void *a, const void *b) 901{ 902 return (spa_aux_compare(a, b)); 903} 904 905void 906spa_spare_add(vdev_t *vd) 907{ 908 mutex_enter(&spa_spare_lock); 909 ASSERT(!vd->vdev_isspare); 910 spa_aux_add(vd, &spa_spare_avl); 911 vd->vdev_isspare = B_TRUE; 912 mutex_exit(&spa_spare_lock); 913} 914 915void 916spa_spare_remove(vdev_t *vd) 917{ 918 mutex_enter(&spa_spare_lock); 919 ASSERT(vd->vdev_isspare); 920 spa_aux_remove(vd, &spa_spare_avl); 921 vd->vdev_isspare = B_FALSE; 922 mutex_exit(&spa_spare_lock); 923} 924 925boolean_t 926spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 927{ 928 boolean_t found; 929 930 mutex_enter(&spa_spare_lock); 931 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 932 mutex_exit(&spa_spare_lock); 933 934 return (found); 935} 936 937void 938spa_spare_activate(vdev_t *vd) 939{ 940 mutex_enter(&spa_spare_lock); 941 ASSERT(vd->vdev_isspare); 942 spa_aux_activate(vd, &spa_spare_avl); 943 mutex_exit(&spa_spare_lock); 944} 945 946/* 947 * Level 2 ARC devices are tracked globally for the same reasons as spares. 948 * Cache devices currently only support one pool per cache device, and so 949 * for these devices the aux reference count is currently unused beyond 1. 950 */ 951 952static int 953spa_l2cache_compare(const void *a, const void *b) 954{ 955 return (spa_aux_compare(a, b)); 956} 957 958void 959spa_l2cache_add(vdev_t *vd) 960{ 961 mutex_enter(&spa_l2cache_lock); 962 ASSERT(!vd->vdev_isl2cache); 963 spa_aux_add(vd, &spa_l2cache_avl); 964 vd->vdev_isl2cache = B_TRUE; 965 mutex_exit(&spa_l2cache_lock); 966} 967 968void 969spa_l2cache_remove(vdev_t *vd) 970{ 971 mutex_enter(&spa_l2cache_lock); 972 ASSERT(vd->vdev_isl2cache); 973 spa_aux_remove(vd, &spa_l2cache_avl); 974 vd->vdev_isl2cache = B_FALSE; 975 mutex_exit(&spa_l2cache_lock); 976} 977 978boolean_t 979spa_l2cache_exists(uint64_t guid, uint64_t *pool) 980{ 981 boolean_t found; 982 983 mutex_enter(&spa_l2cache_lock); 984 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 985 mutex_exit(&spa_l2cache_lock); 986 987 return (found); 988} 989 990void 991spa_l2cache_activate(vdev_t *vd) 992{ 993 mutex_enter(&spa_l2cache_lock); 994 ASSERT(vd->vdev_isl2cache); 995 spa_aux_activate(vd, &spa_l2cache_avl); 996 mutex_exit(&spa_l2cache_lock); 997} 998 999/* 1000 * ========================================================================== 1001 * SPA vdev locking 1002 * ========================================================================== 1003 */ 1004 1005/* 1006 * Lock the given spa_t for the purpose of adding or removing a vdev. 1007 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1008 * It returns the next transaction group for the spa_t. 1009 */ 1010uint64_t 1011spa_vdev_enter(spa_t *spa) 1012{ 1013 mutex_enter(&spa->spa_vdev_top_lock); 1014 mutex_enter(&spa_namespace_lock); 1015 return (spa_vdev_config_enter(spa)); 1016} 1017 1018/* 1019 * Internal implementation for spa_vdev_enter(). Used when a vdev 1020 * operation requires multiple syncs (i.e. removing a device) while 1021 * keeping the spa_namespace_lock held. 1022 */ 1023uint64_t 1024spa_vdev_config_enter(spa_t *spa) 1025{ 1026 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1027 1028 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1029 1030 return (spa_last_synced_txg(spa) + 1); 1031} 1032 1033/* 1034 * Used in combination with spa_vdev_config_enter() to allow the syncing 1035 * of multiple transactions without releasing the spa_namespace_lock. 1036 */ 1037void 1038spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1039{ 1040 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1041 1042 int config_changed = B_FALSE; 1043 1044 ASSERT(txg > spa_last_synced_txg(spa)); 1045 1046 spa->spa_pending_vdev = NULL; 1047 1048 /* 1049 * Reassess the DTLs. 1050 */ 1051 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1052 1053 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1054 config_changed = B_TRUE; 1055 spa->spa_config_generation++; 1056 } 1057 1058 /* 1059 * Verify the metaslab classes. 1060 */ 1061 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1062 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1063 1064 spa_config_exit(spa, SCL_ALL, spa); 1065 1066 /* 1067 * Panic the system if the specified tag requires it. This 1068 * is useful for ensuring that configurations are updated 1069 * transactionally. 1070 */ 1071 if (zio_injection_enabled) 1072 zio_handle_panic_injection(spa, tag, 0); 1073 1074 /* 1075 * Note: this txg_wait_synced() is important because it ensures 1076 * that there won't be more than one config change per txg. 1077 * This allows us to use the txg as the generation number. 1078 */ 1079 if (error == 0) 1080 txg_wait_synced(spa->spa_dsl_pool, txg); 1081 1082 if (vd != NULL) { 1083 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1084 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1085 vdev_free(vd); 1086 spa_config_exit(spa, SCL_ALL, spa); 1087 } 1088 1089 /* 1090 * If the config changed, update the config cache. 1091 */ 1092 if (config_changed) 1093 spa_config_sync(spa, B_FALSE, B_TRUE); 1094} 1095 1096/* 1097 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1098 * locking of spa_vdev_enter(), we also want make sure the transactions have 1099 * synced to disk, and then update the global configuration cache with the new 1100 * information. 1101 */ 1102int 1103spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1104{ 1105 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1106 mutex_exit(&spa_namespace_lock); 1107 mutex_exit(&spa->spa_vdev_top_lock); 1108 1109 return (error); 1110} 1111 1112/* 1113 * Lock the given spa_t for the purpose of changing vdev state. 1114 */ 1115void 1116spa_vdev_state_enter(spa_t *spa, int oplocks) 1117{ 1118 int locks = SCL_STATE_ALL | oplocks; 1119 1120 /* 1121 * Root pools may need to read of the underlying devfs filesystem 1122 * when opening up a vdev. Unfortunately if we're holding the 1123 * SCL_ZIO lock it will result in a deadlock when we try to issue 1124 * the read from the root filesystem. Instead we "prefetch" 1125 * the associated vnodes that we need prior to opening the 1126 * underlying devices and cache them so that we can prevent 1127 * any I/O when we are doing the actual open. 1128 */ 1129 if (spa_is_root(spa)) { 1130 int low = locks & ~(SCL_ZIO - 1); 1131 int high = locks & ~low; 1132 1133 spa_config_enter(spa, high, spa, RW_WRITER); 1134 vdev_hold(spa->spa_root_vdev); 1135 spa_config_enter(spa, low, spa, RW_WRITER); 1136 } else { 1137 spa_config_enter(spa, locks, spa, RW_WRITER); 1138 } 1139 spa->spa_vdev_locks = locks; 1140} 1141 1142int 1143spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1144{ 1145 boolean_t config_changed = B_FALSE; 1146 1147 if (vd != NULL || error == 0) 1148 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1149 0, 0, B_FALSE); 1150 1151 if (vd != NULL) { 1152 vdev_state_dirty(vd->vdev_top); 1153 config_changed = B_TRUE; 1154 spa->spa_config_generation++; 1155 } 1156 1157 if (spa_is_root(spa)) 1158 vdev_rele(spa->spa_root_vdev); 1159 1160 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1161 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1162 1163 /* 1164 * If anything changed, wait for it to sync. This ensures that, 1165 * from the system administrator's perspective, zpool(1M) commands 1166 * are synchronous. This is important for things like zpool offline: 1167 * when the command completes, you expect no further I/O from ZFS. 1168 */ 1169 if (vd != NULL) 1170 txg_wait_synced(spa->spa_dsl_pool, 0); 1171 1172 /* 1173 * If the config changed, update the config cache. 1174 */ 1175 if (config_changed) { 1176 mutex_enter(&spa_namespace_lock); 1177 spa_config_sync(spa, B_FALSE, B_TRUE); 1178 mutex_exit(&spa_namespace_lock); 1179 } 1180 1181 return (error); 1182} 1183 1184/* 1185 * ========================================================================== 1186 * Miscellaneous functions 1187 * ========================================================================== 1188 */ 1189 1190void 1191spa_activate_mos_feature(spa_t *spa, const char *feature) 1192{ 1193 (void) nvlist_add_boolean(spa->spa_label_features, feature); 1194 vdev_config_dirty(spa->spa_root_vdev); 1195} 1196 1197void 1198spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1199{ 1200 (void) nvlist_remove_all(spa->spa_label_features, feature); 1201 vdev_config_dirty(spa->spa_root_vdev); 1202} 1203 1204/* 1205 * Rename a spa_t. 1206 */ 1207int 1208spa_rename(const char *name, const char *newname) 1209{ 1210 spa_t *spa; 1211 int err; 1212 1213 /* 1214 * Lookup the spa_t and grab the config lock for writing. We need to 1215 * actually open the pool so that we can sync out the necessary labels. 1216 * It's OK to call spa_open() with the namespace lock held because we 1217 * allow recursive calls for other reasons. 1218 */ 1219 mutex_enter(&spa_namespace_lock); 1220 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1221 mutex_exit(&spa_namespace_lock); 1222 return (err); 1223 } 1224 1225 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1226 1227 avl_remove(&spa_namespace_avl, spa); 1228 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1229 avl_add(&spa_namespace_avl, spa); 1230 1231 /* 1232 * Sync all labels to disk with the new names by marking the root vdev 1233 * dirty and waiting for it to sync. It will pick up the new pool name 1234 * during the sync. 1235 */ 1236 vdev_config_dirty(spa->spa_root_vdev); 1237 1238 spa_config_exit(spa, SCL_ALL, FTAG); 1239 1240 txg_wait_synced(spa->spa_dsl_pool, 0); 1241 1242 /* 1243 * Sync the updated config cache. 1244 */ 1245 spa_config_sync(spa, B_FALSE, B_TRUE); 1246 1247 spa_close(spa, FTAG); 1248 1249 mutex_exit(&spa_namespace_lock); 1250 1251 return (0); 1252} 1253 1254/* 1255 * Return the spa_t associated with given pool_guid, if it exists. If 1256 * device_guid is non-zero, determine whether the pool exists *and* contains 1257 * a device with the specified device_guid. 1258 */ 1259spa_t * 1260spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1261{ 1262 spa_t *spa; 1263 avl_tree_t *t = &spa_namespace_avl; 1264 1265 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1266 1267 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1268 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1269 continue; 1270 if (spa->spa_root_vdev == NULL) 1271 continue; 1272 if (spa_guid(spa) == pool_guid) { 1273 if (device_guid == 0) 1274 break; 1275 1276 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1277 device_guid) != NULL) 1278 break; 1279 1280 /* 1281 * Check any devices we may be in the process of adding. 1282 */ 1283 if (spa->spa_pending_vdev) { 1284 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1285 device_guid) != NULL) 1286 break; 1287 } 1288 } 1289 } 1290 1291 return (spa); 1292} 1293 1294/* 1295 * Determine whether a pool with the given pool_guid exists. 1296 */ 1297boolean_t 1298spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1299{ 1300 return (spa_by_guid(pool_guid, device_guid) != NULL); 1301} 1302 1303char * 1304spa_strdup(const char *s) 1305{ 1306 size_t len; 1307 char *new; 1308 1309 len = strlen(s); 1310 new = kmem_alloc(len + 1, KM_SLEEP); 1311 bcopy(s, new, len); 1312 new[len] = '\0'; 1313 1314 return (new); 1315} 1316 1317void 1318spa_strfree(char *s) 1319{ 1320 kmem_free(s, strlen(s) + 1); 1321} 1322 1323uint64_t 1324spa_get_random(uint64_t range) 1325{ 1326 uint64_t r; 1327 1328 ASSERT(range != 0); 1329 1330 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1331 1332 return (r % range); 1333} 1334 1335uint64_t 1336spa_generate_guid(spa_t *spa) 1337{ 1338 uint64_t guid = spa_get_random(-1ULL); 1339 1340 if (spa != NULL) { 1341 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1342 guid = spa_get_random(-1ULL); 1343 } else { 1344 while (guid == 0 || spa_guid_exists(guid, 0)) 1345 guid = spa_get_random(-1ULL); 1346 } 1347 1348 return (guid); 1349} 1350 1351void 1352sprintf_blkptr(char *buf, const blkptr_t *bp) 1353{ 1354 char type[256]; 1355 char *checksum = NULL; 1356 char *compress = NULL; 1357 1358 if (bp != NULL) { 1359 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1360 dmu_object_byteswap_t bswap = 1361 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1362 (void) snprintf(type, sizeof (type), "bswap %s %s", 1363 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1364 "metadata" : "data", 1365 dmu_ot_byteswap[bswap].ob_name); 1366 } else { 1367 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1368 sizeof (type)); 1369 } 1370 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1371 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1372 } 1373 1374 SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress); 1375} 1376 1377void 1378spa_freeze(spa_t *spa) 1379{ 1380 uint64_t freeze_txg = 0; 1381 1382 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1383 if (spa->spa_freeze_txg == UINT64_MAX) { 1384 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1385 spa->spa_freeze_txg = freeze_txg; 1386 } 1387 spa_config_exit(spa, SCL_ALL, FTAG); 1388 if (freeze_txg != 0) 1389 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1390} 1391 1392void 1393zfs_panic_recover(const char *fmt, ...) 1394{ 1395 va_list adx; 1396 1397 va_start(adx, fmt); 1398 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1399 va_end(adx); 1400} 1401 1402/* 1403 * This is a stripped-down version of strtoull, suitable only for converting 1404 * lowercase hexadecimal numbers that don't overflow. 1405 */ 1406uint64_t 1407zfs_strtonum(const char *str, char **nptr) 1408{ 1409 uint64_t val = 0; 1410 char c; 1411 int digit; 1412 1413 while ((c = *str) != '\0') { 1414 if (c >= '0' && c <= '9') 1415 digit = c - '0'; 1416 else if (c >= 'a' && c <= 'f') 1417 digit = 10 + c - 'a'; 1418 else 1419 break; 1420 1421 val *= 16; 1422 val += digit; 1423 1424 str++; 1425 } 1426 1427 if (nptr) 1428 *nptr = (char *)str; 1429 1430 return (val); 1431} 1432 1433/* 1434 * ========================================================================== 1435 * Accessor functions 1436 * ========================================================================== 1437 */ 1438 1439boolean_t 1440spa_shutting_down(spa_t *spa) 1441{ 1442 return (spa->spa_async_suspended); 1443} 1444 1445dsl_pool_t * 1446spa_get_dsl(spa_t *spa) 1447{ 1448 return (spa->spa_dsl_pool); 1449} 1450 1451boolean_t 1452spa_is_initializing(spa_t *spa) 1453{ 1454 return (spa->spa_is_initializing); 1455} 1456 1457blkptr_t * 1458spa_get_rootblkptr(spa_t *spa) 1459{ 1460 return (&spa->spa_ubsync.ub_rootbp); 1461} 1462 1463void 1464spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1465{ 1466 spa->spa_uberblock.ub_rootbp = *bp; 1467} 1468 1469void 1470spa_altroot(spa_t *spa, char *buf, size_t buflen) 1471{ 1472 if (spa->spa_root == NULL) 1473 buf[0] = '\0'; 1474 else 1475 (void) strncpy(buf, spa->spa_root, buflen); 1476} 1477 1478int 1479spa_sync_pass(spa_t *spa) 1480{ 1481 return (spa->spa_sync_pass); 1482} 1483 1484char * 1485spa_name(spa_t *spa) 1486{ 1487 return (spa->spa_name); 1488} 1489 1490uint64_t 1491spa_guid(spa_t *spa) 1492{ 1493 dsl_pool_t *dp = spa_get_dsl(spa); 1494 uint64_t guid; 1495 1496 /* 1497 * If we fail to parse the config during spa_load(), we can go through 1498 * the error path (which posts an ereport) and end up here with no root 1499 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1500 * this case. 1501 */ 1502 if (spa->spa_root_vdev == NULL) 1503 return (spa->spa_config_guid); 1504 1505 guid = spa->spa_last_synced_guid != 0 ? 1506 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1507 1508 /* 1509 * Return the most recently synced out guid unless we're 1510 * in syncing context. 1511 */ 1512 if (dp && dsl_pool_sync_context(dp)) 1513 return (spa->spa_root_vdev->vdev_guid); 1514 else 1515 return (guid); 1516} 1517 1518uint64_t 1519spa_load_guid(spa_t *spa) 1520{ 1521 /* 1522 * This is a GUID that exists solely as a reference for the 1523 * purposes of the arc. It is generated at load time, and 1524 * is never written to persistent storage. 1525 */ 1526 return (spa->spa_load_guid); 1527} 1528 1529uint64_t 1530spa_last_synced_txg(spa_t *spa) 1531{ 1532 return (spa->spa_ubsync.ub_txg); 1533} 1534 1535uint64_t 1536spa_first_txg(spa_t *spa) 1537{ 1538 return (spa->spa_first_txg); 1539} 1540 1541uint64_t 1542spa_syncing_txg(spa_t *spa) 1543{ 1544 return (spa->spa_syncing_txg); 1545} 1546 1547pool_state_t 1548spa_state(spa_t *spa) 1549{ 1550 return (spa->spa_state); 1551} 1552 1553spa_load_state_t 1554spa_load_state(spa_t *spa) 1555{ 1556 return (spa->spa_load_state); 1557} 1558 1559uint64_t 1560spa_freeze_txg(spa_t *spa) 1561{ 1562 return (spa->spa_freeze_txg); 1563} 1564 1565/* ARGSUSED */ 1566uint64_t 1567spa_get_asize(spa_t *spa, uint64_t lsize) 1568{ 1569 return (lsize * spa_asize_inflation); 1570} 1571 1572uint64_t 1573spa_get_dspace(spa_t *spa) 1574{ 1575 return (spa->spa_dspace); 1576} 1577 1578void 1579spa_update_dspace(spa_t *spa) 1580{ 1581 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1582 ddt_get_dedup_dspace(spa); 1583} 1584 1585/* 1586 * Return the failure mode that has been set to this pool. The default 1587 * behavior will be to block all I/Os when a complete failure occurs. 1588 */ 1589uint8_t 1590spa_get_failmode(spa_t *spa) 1591{ 1592 return (spa->spa_failmode); 1593} 1594 1595boolean_t 1596spa_suspended(spa_t *spa) 1597{ 1598 return (spa->spa_suspended); 1599} 1600 1601uint64_t 1602spa_version(spa_t *spa) 1603{ 1604 return (spa->spa_ubsync.ub_version); 1605} 1606 1607boolean_t 1608spa_deflate(spa_t *spa) 1609{ 1610 return (spa->spa_deflate); 1611} 1612 1613metaslab_class_t * 1614spa_normal_class(spa_t *spa) 1615{ 1616 return (spa->spa_normal_class); 1617} 1618 1619metaslab_class_t * 1620spa_log_class(spa_t *spa) 1621{ 1622 return (spa->spa_log_class); 1623} 1624 1625int 1626spa_max_replication(spa_t *spa) 1627{ 1628 /* 1629 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1630 * handle BPs with more than one DVA allocated. Set our max 1631 * replication level accordingly. 1632 */ 1633 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1634 return (1); 1635 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1636} 1637 1638int 1639spa_prev_software_version(spa_t *spa) 1640{ 1641 return (spa->spa_prev_software_version); 1642} 1643 1644uint64_t 1645spa_deadman_synctime(spa_t *spa) 1646{ 1647 return (spa->spa_deadman_synctime); 1648} 1649 1650uint64_t 1651dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1652{ 1653 uint64_t asize = DVA_GET_ASIZE(dva); 1654 uint64_t dsize = asize; 1655 1656 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1657 1658 if (asize != 0 && spa->spa_deflate) { 1659 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1660 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1661 } 1662 1663 return (dsize); 1664} 1665 1666uint64_t 1667bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1668{ 1669 uint64_t dsize = 0; 1670 1671 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1672 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1673 1674 return (dsize); 1675} 1676 1677uint64_t 1678bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1679{ 1680 uint64_t dsize = 0; 1681 1682 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1683 1684 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1685 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1686 1687 spa_config_exit(spa, SCL_VDEV, FTAG); 1688 1689 return (dsize); 1690} 1691 1692/* 1693 * ========================================================================== 1694 * Initialization and Termination 1695 * ========================================================================== 1696 */ 1697 1698static int 1699spa_name_compare(const void *a1, const void *a2) 1700{ 1701 const spa_t *s1 = a1; 1702 const spa_t *s2 = a2; 1703 int s; 1704 1705 s = strcmp(s1->spa_name, s2->spa_name); 1706 if (s > 0) 1707 return (1); 1708 if (s < 0) 1709 return (-1); 1710 return (0); 1711} 1712 1713int 1714spa_busy(void) 1715{ 1716 return (spa_active_count); 1717} 1718 1719void 1720spa_boot_init() 1721{ 1722 spa_config_load(); 1723} 1724 1725#ifdef _KERNEL 1726EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1727#endif 1728 1729void 1730spa_init(int mode) 1731{ 1732 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1733 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1734 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1735 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1736 1737 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1738 offsetof(spa_t, spa_avl)); 1739 1740 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1741 offsetof(spa_aux_t, aux_avl)); 1742 1743 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1744 offsetof(spa_aux_t, aux_avl)); 1745 1746 spa_mode_global = mode; 1747 1748#ifdef illumos 1749#ifdef _KERNEL 1750 spa_arch_init(); 1751#else 1752 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1753 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1754 if (arc_procfd == -1) { 1755 perror("could not enable watchpoints: " 1756 "opening /proc/self/ctl failed: "); 1757 } else { 1758 arc_watch = B_TRUE; 1759 } 1760 } 1761#endif 1762#endif /* illumos */ 1763 refcount_sysinit(); 1764 unique_init(); 1765 range_tree_init(); 1766 zio_init(); 1767 lz4_init(); 1768 dmu_init(); 1769 zil_init(); 1770 vdev_cache_stat_init(); 1771 zfs_prop_init(); 1772 zpool_prop_init(); 1773 zpool_feature_init(); 1774 spa_config_load(); 1775 l2arc_start(); 1776#ifndef illumos 1777#ifdef _KERNEL 1778 zfs_deadman_init(); 1779#endif 1780#endif /* !illumos */ 1781} 1782 1783void 1784spa_fini(void) 1785{ 1786 l2arc_stop(); 1787 1788 spa_evict_all(); 1789 1790 vdev_cache_stat_fini(); 1791 zil_fini(); 1792 dmu_fini(); 1793 lz4_fini(); 1794 zio_fini(); 1795 range_tree_fini(); 1796 unique_fini(); 1797 refcount_fini(); 1798 1799 avl_destroy(&spa_namespace_avl); 1800 avl_destroy(&spa_spare_avl); 1801 avl_destroy(&spa_l2cache_avl); 1802 1803 cv_destroy(&spa_namespace_cv); 1804 mutex_destroy(&spa_namespace_lock); 1805 mutex_destroy(&spa_spare_lock); 1806 mutex_destroy(&spa_l2cache_lock); 1807} 1808 1809/* 1810 * Return whether this pool has slogs. No locking needed. 1811 * It's not a problem if the wrong answer is returned as it's only for 1812 * performance and not correctness 1813 */ 1814boolean_t 1815spa_has_slogs(spa_t *spa) 1816{ 1817 return (spa->spa_log_class->mc_rotor != NULL); 1818} 1819 1820spa_log_state_t 1821spa_get_log_state(spa_t *spa) 1822{ 1823 return (spa->spa_log_state); 1824} 1825 1826void 1827spa_set_log_state(spa_t *spa, spa_log_state_t state) 1828{ 1829 spa->spa_log_state = state; 1830} 1831 1832boolean_t 1833spa_is_root(spa_t *spa) 1834{ 1835 return (spa->spa_is_root); 1836} 1837 1838boolean_t 1839spa_writeable(spa_t *spa) 1840{ 1841 return (!!(spa->spa_mode & FWRITE)); 1842} 1843 1844int 1845spa_mode(spa_t *spa) 1846{ 1847 return (spa->spa_mode); 1848} 1849 1850uint64_t 1851spa_bootfs(spa_t *spa) 1852{ 1853 return (spa->spa_bootfs); 1854} 1855 1856uint64_t 1857spa_delegation(spa_t *spa) 1858{ 1859 return (spa->spa_delegation); 1860} 1861 1862objset_t * 1863spa_meta_objset(spa_t *spa) 1864{ 1865 return (spa->spa_meta_objset); 1866} 1867 1868enum zio_checksum 1869spa_dedup_checksum(spa_t *spa) 1870{ 1871 return (spa->spa_dedup_checksum); 1872} 1873 1874/* 1875 * Reset pool scan stat per scan pass (or reboot). 1876 */ 1877void 1878spa_scan_stat_init(spa_t *spa) 1879{ 1880 /* data not stored on disk */ 1881 spa->spa_scan_pass_start = gethrestime_sec(); 1882 spa->spa_scan_pass_exam = 0; 1883 vdev_scan_stat_init(spa->spa_root_vdev); 1884} 1885 1886/* 1887 * Get scan stats for zpool status reports 1888 */ 1889int 1890spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 1891{ 1892 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 1893 1894 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 1895 return (SET_ERROR(ENOENT)); 1896 bzero(ps, sizeof (pool_scan_stat_t)); 1897 1898 /* data stored on disk */ 1899 ps->pss_func = scn->scn_phys.scn_func; 1900 ps->pss_start_time = scn->scn_phys.scn_start_time; 1901 ps->pss_end_time = scn->scn_phys.scn_end_time; 1902 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 1903 ps->pss_examined = scn->scn_phys.scn_examined; 1904 ps->pss_to_process = scn->scn_phys.scn_to_process; 1905 ps->pss_processed = scn->scn_phys.scn_processed; 1906 ps->pss_errors = scn->scn_phys.scn_errors; 1907 ps->pss_state = scn->scn_phys.scn_state; 1908 1909 /* data not stored on disk */ 1910 ps->pss_pass_start = spa->spa_scan_pass_start; 1911 ps->pss_pass_exam = spa->spa_scan_pass_exam; 1912 1913 return (0); 1914} 1915 1916boolean_t 1917spa_debug_enabled(spa_t *spa) 1918{ 1919 return (spa->spa_debug); 1920} 1921