spa_misc.c revision 314668
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31#include <sys/zfs_context.h> 32#include <sys/spa_impl.h> 33#include <sys/spa_boot.h> 34#include <sys/zio.h> 35#include <sys/zio_checksum.h> 36#include <sys/zio_compress.h> 37#include <sys/dmu.h> 38#include <sys/dmu_tx.h> 39#include <sys/zap.h> 40#include <sys/zil.h> 41#include <sys/vdev_impl.h> 42#include <sys/metaslab.h> 43#include <sys/uberblock_impl.h> 44#include <sys/txg.h> 45#include <sys/avl.h> 46#include <sys/unique.h> 47#include <sys/dsl_pool.h> 48#include <sys/dsl_dir.h> 49#include <sys/dsl_prop.h> 50#include <sys/dsl_scan.h> 51#include <sys/fs/zfs.h> 52#include <sys/metaslab_impl.h> 53#include <sys/arc.h> 54#include <sys/ddt.h> 55#include "zfs_prop.h" 56#include <sys/zfeature.h> 57 58/* 59 * SPA locking 60 * 61 * There are four basic locks for managing spa_t structures: 62 * 63 * spa_namespace_lock (global mutex) 64 * 65 * This lock must be acquired to do any of the following: 66 * 67 * - Lookup a spa_t by name 68 * - Add or remove a spa_t from the namespace 69 * - Increase spa_refcount from non-zero 70 * - Check if spa_refcount is zero 71 * - Rename a spa_t 72 * - add/remove/attach/detach devices 73 * - Held for the duration of create/destroy/import/export 74 * 75 * It does not need to handle recursion. A create or destroy may 76 * reference objects (files or zvols) in other pools, but by 77 * definition they must have an existing reference, and will never need 78 * to lookup a spa_t by name. 79 * 80 * spa_refcount (per-spa refcount_t protected by mutex) 81 * 82 * This reference count keep track of any active users of the spa_t. The 83 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 84 * the refcount is never really 'zero' - opening a pool implicitly keeps 85 * some references in the DMU. Internally we check against spa_minref, but 86 * present the image of a zero/non-zero value to consumers. 87 * 88 * spa_config_lock[] (per-spa array of rwlocks) 89 * 90 * This protects the spa_t from config changes, and must be held in 91 * the following circumstances: 92 * 93 * - RW_READER to perform I/O to the spa 94 * - RW_WRITER to change the vdev config 95 * 96 * The locking order is fairly straightforward: 97 * 98 * spa_namespace_lock -> spa_refcount 99 * 100 * The namespace lock must be acquired to increase the refcount from 0 101 * or to check if it is zero. 102 * 103 * spa_refcount -> spa_config_lock[] 104 * 105 * There must be at least one valid reference on the spa_t to acquire 106 * the config lock. 107 * 108 * spa_namespace_lock -> spa_config_lock[] 109 * 110 * The namespace lock must always be taken before the config lock. 111 * 112 * 113 * The spa_namespace_lock can be acquired directly and is globally visible. 114 * 115 * The namespace is manipulated using the following functions, all of which 116 * require the spa_namespace_lock to be held. 117 * 118 * spa_lookup() Lookup a spa_t by name. 119 * 120 * spa_add() Create a new spa_t in the namespace. 121 * 122 * spa_remove() Remove a spa_t from the namespace. This also 123 * frees up any memory associated with the spa_t. 124 * 125 * spa_next() Returns the next spa_t in the system, or the 126 * first if NULL is passed. 127 * 128 * spa_evict_all() Shutdown and remove all spa_t structures in 129 * the system. 130 * 131 * spa_guid_exists() Determine whether a pool/device guid exists. 132 * 133 * The spa_refcount is manipulated using the following functions: 134 * 135 * spa_open_ref() Adds a reference to the given spa_t. Must be 136 * called with spa_namespace_lock held if the 137 * refcount is currently zero. 138 * 139 * spa_close() Remove a reference from the spa_t. This will 140 * not free the spa_t or remove it from the 141 * namespace. No locking is required. 142 * 143 * spa_refcount_zero() Returns true if the refcount is currently 144 * zero. Must be called with spa_namespace_lock 145 * held. 146 * 147 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 148 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 149 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 150 * 151 * To read the configuration, it suffices to hold one of these locks as reader. 152 * To modify the configuration, you must hold all locks as writer. To modify 153 * vdev state without altering the vdev tree's topology (e.g. online/offline), 154 * you must hold SCL_STATE and SCL_ZIO as writer. 155 * 156 * We use these distinct config locks to avoid recursive lock entry. 157 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 158 * block allocations (SCL_ALLOC), which may require reading space maps 159 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 160 * 161 * The spa config locks cannot be normal rwlocks because we need the 162 * ability to hand off ownership. For example, SCL_ZIO is acquired 163 * by the issuing thread and later released by an interrupt thread. 164 * They do, however, obey the usual write-wanted semantics to prevent 165 * writer (i.e. system administrator) starvation. 166 * 167 * The lock acquisition rules are as follows: 168 * 169 * SCL_CONFIG 170 * Protects changes to the vdev tree topology, such as vdev 171 * add/remove/attach/detach. Protects the dirty config list 172 * (spa_config_dirty_list) and the set of spares and l2arc devices. 173 * 174 * SCL_STATE 175 * Protects changes to pool state and vdev state, such as vdev 176 * online/offline/fault/degrade/clear. Protects the dirty state list 177 * (spa_state_dirty_list) and global pool state (spa_state). 178 * 179 * SCL_ALLOC 180 * Protects changes to metaslab groups and classes. 181 * Held as reader by metaslab_alloc() and metaslab_claim(). 182 * 183 * SCL_ZIO 184 * Held by bp-level zios (those which have no io_vd upon entry) 185 * to prevent changes to the vdev tree. The bp-level zio implicitly 186 * protects all of its vdev child zios, which do not hold SCL_ZIO. 187 * 188 * SCL_FREE 189 * Protects changes to metaslab groups and classes. 190 * Held as reader by metaslab_free(). SCL_FREE is distinct from 191 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 192 * blocks in zio_done() while another i/o that holds either 193 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 194 * 195 * SCL_VDEV 196 * Held as reader to prevent changes to the vdev tree during trivial 197 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 198 * other locks, and lower than all of them, to ensure that it's safe 199 * to acquire regardless of caller context. 200 * 201 * In addition, the following rules apply: 202 * 203 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 204 * The lock ordering is SCL_CONFIG > spa_props_lock. 205 * 206 * (b) I/O operations on leaf vdevs. For any zio operation that takes 207 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 208 * or zio_write_phys() -- the caller must ensure that the config cannot 209 * cannot change in the interim, and that the vdev cannot be reopened. 210 * SCL_STATE as reader suffices for both. 211 * 212 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 213 * 214 * spa_vdev_enter() Acquire the namespace lock and the config lock 215 * for writing. 216 * 217 * spa_vdev_exit() Release the config lock, wait for all I/O 218 * to complete, sync the updated configs to the 219 * cache, and release the namespace lock. 220 * 221 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 222 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 223 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 224 * 225 * spa_rename() is also implemented within this file since it requires 226 * manipulation of the namespace. 227 */ 228 229static avl_tree_t spa_namespace_avl; 230kmutex_t spa_namespace_lock; 231static kcondvar_t spa_namespace_cv; 232static int spa_active_count; 233int spa_max_replication_override = SPA_DVAS_PER_BP; 234 235static kmutex_t spa_spare_lock; 236static avl_tree_t spa_spare_avl; 237static kmutex_t spa_l2cache_lock; 238static avl_tree_t spa_l2cache_avl; 239 240kmem_cache_t *spa_buffer_pool; 241int spa_mode_global; 242 243#ifdef ZFS_DEBUG 244/* Everything except dprintf and spa is on by default in debug builds */ 245int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 246#else 247int zfs_flags = 0; 248#endif 249SYSCTL_DECL(_debug); 250TUNABLE_INT("debug.zfs_flags", &zfs_flags); 251SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 252 "ZFS debug flags."); 253 254/* 255 * zfs_recover can be set to nonzero to attempt to recover from 256 * otherwise-fatal errors, typically caused by on-disk corruption. When 257 * set, calls to zfs_panic_recover() will turn into warning messages. 258 * This should only be used as a last resort, as it typically results 259 * in leaked space, or worse. 260 */ 261boolean_t zfs_recover = B_FALSE; 262SYSCTL_DECL(_vfs_zfs); 263TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 264SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 265 "Try to recover from otherwise-fatal errors."); 266 267static int 268sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 269{ 270 int err, val; 271 272 val = zfs_flags; 273 err = sysctl_handle_int(oidp, &val, 0, req); 274 if (err != 0 || req->newptr == NULL) 275 return (err); 276 277 /* 278 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 279 * arc buffers in the system have the necessary additional 280 * checksum data. However, it is safe to disable at any 281 * time. 282 */ 283 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 284 val &= ~ZFS_DEBUG_MODIFY; 285 zfs_flags = val; 286 287 return (0); 288} 289TUNABLE_INT("vfs.zfs.debug_flags", &zfs_flags); 290SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 291 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 292 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 293 294/* 295 * If destroy encounters an EIO while reading metadata (e.g. indirect 296 * blocks), space referenced by the missing metadata can not be freed. 297 * Normally this causes the background destroy to become "stalled", as 298 * it is unable to make forward progress. While in this stalled state, 299 * all remaining space to free from the error-encountering filesystem is 300 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 301 * permanently leak the space from indirect blocks that can not be read, 302 * and continue to free everything else that it can. 303 * 304 * The default, "stalling" behavior is useful if the storage partially 305 * fails (i.e. some but not all i/os fail), and then later recovers. In 306 * this case, we will be able to continue pool operations while it is 307 * partially failed, and when it recovers, we can continue to free the 308 * space, with no leaks. However, note that this case is actually 309 * fairly rare. 310 * 311 * Typically pools either (a) fail completely (but perhaps temporarily, 312 * e.g. a top-level vdev going offline), or (b) have localized, 313 * permanent errors (e.g. disk returns the wrong data due to bit flip or 314 * firmware bug). In case (a), this setting does not matter because the 315 * pool will be suspended and the sync thread will not be able to make 316 * forward progress regardless. In case (b), because the error is 317 * permanent, the best we can do is leak the minimum amount of space, 318 * which is what setting this flag will do. Therefore, it is reasonable 319 * for this flag to normally be set, but we chose the more conservative 320 * approach of not setting it, so that there is no possibility of 321 * leaking space in the "partial temporary" failure case. 322 */ 323boolean_t zfs_free_leak_on_eio = B_FALSE; 324 325/* 326 * Expiration time in milliseconds. This value has two meanings. First it is 327 * used to determine when the spa_deadman() logic should fire. By default the 328 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 329 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 330 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 331 * in a system panic. 332 */ 333uint64_t zfs_deadman_synctime_ms = 1000000ULL; 334TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 335SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 336 &zfs_deadman_synctime_ms, 0, 337 "Stalled ZFS I/O expiration time in milliseconds"); 338 339/* 340 * Check time in milliseconds. This defines the frequency at which we check 341 * for hung I/O. 342 */ 343uint64_t zfs_deadman_checktime_ms = 5000ULL; 344TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 345SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 346 &zfs_deadman_checktime_ms, 0, 347 "Period of checks for stalled ZFS I/O in milliseconds"); 348 349/* 350 * Default value of -1 for zfs_deadman_enabled is resolved in 351 * zfs_deadman_init() 352 */ 353int zfs_deadman_enabled = -1; 354TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 355SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 356 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 357 358/* 359 * The worst case is single-sector max-parity RAID-Z blocks, in which 360 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 361 * times the size; so just assume that. Add to this the fact that 362 * we can have up to 3 DVAs per bp, and one more factor of 2 because 363 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 364 * the worst case is: 365 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 366 */ 367int spa_asize_inflation = 24; 368TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation); 369SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 370 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 371 372#ifndef illumos 373#ifdef _KERNEL 374static void 375zfs_deadman_init() 376{ 377 /* 378 * If we are not i386 or amd64 or in a virtual machine, 379 * disable ZFS deadman thread by default 380 */ 381 if (zfs_deadman_enabled == -1) { 382#if defined(__amd64__) || defined(__i386__) 383 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 384#else 385 zfs_deadman_enabled = 0; 386#endif 387 } 388} 389#endif /* _KERNEL */ 390#endif /* !illumos */ 391 392/* 393 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 394 * the pool to be consumed. This ensures that we don't run the pool 395 * completely out of space, due to unaccounted changes (e.g. to the MOS). 396 * It also limits the worst-case time to allocate space. If we have 397 * less than this amount of free space, most ZPL operations (e.g. write, 398 * create) will return ENOSPC. 399 * 400 * Certain operations (e.g. file removal, most administrative actions) can 401 * use half the slop space. They will only return ENOSPC if less than half 402 * the slop space is free. Typically, once the pool has less than the slop 403 * space free, the user will use these operations to free up space in the pool. 404 * These are the operations that call dsl_pool_adjustedsize() with the netfree 405 * argument set to TRUE. 406 * 407 * A very restricted set of operations are always permitted, regardless of 408 * the amount of free space. These are the operations that call 409 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 410 * operations result in a net increase in the amount of space used, 411 * it is possible to run the pool completely out of space, causing it to 412 * be permanently read-only. 413 * 414 * Note that on very small pools, the slop space will be larger than 415 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 416 * but we never allow it to be more than half the pool size. 417 * 418 * See also the comments in zfs_space_check_t. 419 */ 420int spa_slop_shift = 5; 421SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 422 &spa_slop_shift, 0, 423 "Shift value of reserved space (1/(2^spa_slop_shift))."); 424uint64_t spa_min_slop = 128 * 1024 * 1024; 425SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, spa_min_slop, CTLFLAG_RWTUN, 426 &spa_min_slop, 0, 427 "Minimal value of reserved space"); 428 429/* 430 * ========================================================================== 431 * SPA config locking 432 * ========================================================================== 433 */ 434static void 435spa_config_lock_init(spa_t *spa) 436{ 437 for (int i = 0; i < SCL_LOCKS; i++) { 438 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 439 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 440 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 441 refcount_create_untracked(&scl->scl_count); 442 scl->scl_writer = NULL; 443 scl->scl_write_wanted = 0; 444 } 445} 446 447static void 448spa_config_lock_destroy(spa_t *spa) 449{ 450 for (int i = 0; i < SCL_LOCKS; i++) { 451 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 452 mutex_destroy(&scl->scl_lock); 453 cv_destroy(&scl->scl_cv); 454 refcount_destroy(&scl->scl_count); 455 ASSERT(scl->scl_writer == NULL); 456 ASSERT(scl->scl_write_wanted == 0); 457 } 458} 459 460int 461spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 462{ 463 for (int i = 0; i < SCL_LOCKS; i++) { 464 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 465 if (!(locks & (1 << i))) 466 continue; 467 mutex_enter(&scl->scl_lock); 468 if (rw == RW_READER) { 469 if (scl->scl_writer || scl->scl_write_wanted) { 470 mutex_exit(&scl->scl_lock); 471 spa_config_exit(spa, locks & ((1 << i) - 1), 472 tag); 473 return (0); 474 } 475 } else { 476 ASSERT(scl->scl_writer != curthread); 477 if (!refcount_is_zero(&scl->scl_count)) { 478 mutex_exit(&scl->scl_lock); 479 spa_config_exit(spa, locks & ((1 << i) - 1), 480 tag); 481 return (0); 482 } 483 scl->scl_writer = curthread; 484 } 485 (void) refcount_add(&scl->scl_count, tag); 486 mutex_exit(&scl->scl_lock); 487 } 488 return (1); 489} 490 491void 492spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 493{ 494 int wlocks_held = 0; 495 496 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 497 498 for (int i = 0; i < SCL_LOCKS; i++) { 499 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 500 if (scl->scl_writer == curthread) 501 wlocks_held |= (1 << i); 502 if (!(locks & (1 << i))) 503 continue; 504 mutex_enter(&scl->scl_lock); 505 if (rw == RW_READER) { 506 while (scl->scl_writer || scl->scl_write_wanted) { 507 cv_wait(&scl->scl_cv, &scl->scl_lock); 508 } 509 } else { 510 ASSERT(scl->scl_writer != curthread); 511 while (!refcount_is_zero(&scl->scl_count)) { 512 scl->scl_write_wanted++; 513 cv_wait(&scl->scl_cv, &scl->scl_lock); 514 scl->scl_write_wanted--; 515 } 516 scl->scl_writer = curthread; 517 } 518 (void) refcount_add(&scl->scl_count, tag); 519 mutex_exit(&scl->scl_lock); 520 } 521 ASSERT(wlocks_held <= locks); 522} 523 524void 525spa_config_exit(spa_t *spa, int locks, void *tag) 526{ 527 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 528 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 529 if (!(locks & (1 << i))) 530 continue; 531 mutex_enter(&scl->scl_lock); 532 ASSERT(!refcount_is_zero(&scl->scl_count)); 533 if (refcount_remove(&scl->scl_count, tag) == 0) { 534 ASSERT(scl->scl_writer == NULL || 535 scl->scl_writer == curthread); 536 scl->scl_writer = NULL; /* OK in either case */ 537 cv_broadcast(&scl->scl_cv); 538 } 539 mutex_exit(&scl->scl_lock); 540 } 541} 542 543int 544spa_config_held(spa_t *spa, int locks, krw_t rw) 545{ 546 int locks_held = 0; 547 548 for (int i = 0; i < SCL_LOCKS; i++) { 549 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 550 if (!(locks & (1 << i))) 551 continue; 552 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 553 (rw == RW_WRITER && scl->scl_writer == curthread)) 554 locks_held |= 1 << i; 555 } 556 557 return (locks_held); 558} 559 560/* 561 * ========================================================================== 562 * SPA namespace functions 563 * ========================================================================== 564 */ 565 566/* 567 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 568 * Returns NULL if no matching spa_t is found. 569 */ 570spa_t * 571spa_lookup(const char *name) 572{ 573 static spa_t search; /* spa_t is large; don't allocate on stack */ 574 spa_t *spa; 575 avl_index_t where; 576 char *cp; 577 578 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 579 580 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 581 582 /* 583 * If it's a full dataset name, figure out the pool name and 584 * just use that. 585 */ 586 cp = strpbrk(search.spa_name, "/@#"); 587 if (cp != NULL) 588 *cp = '\0'; 589 590 spa = avl_find(&spa_namespace_avl, &search, &where); 591 592 return (spa); 593} 594 595/* 596 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 597 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 598 * looking for potentially hung I/Os. 599 */ 600static void 601spa_deadman(void *arg, int pending) 602{ 603 spa_t *spa = arg; 604 605 /* 606 * Disable the deadman timer if the pool is suspended. 607 */ 608 if (spa_suspended(spa)) { 609#ifdef illumos 610 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 611#else 612 /* Nothing. just don't schedule any future callouts. */ 613#endif 614 return; 615 } 616 617 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 618 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 619 ++spa->spa_deadman_calls); 620 if (zfs_deadman_enabled) 621 vdev_deadman(spa->spa_root_vdev); 622#ifdef __FreeBSD__ 623#ifdef _KERNEL 624 callout_schedule(&spa->spa_deadman_cycid, 625 hz * zfs_deadman_checktime_ms / MILLISEC); 626#endif 627#endif 628} 629 630#if defined(__FreeBSD__) && defined(_KERNEL) 631static void 632spa_deadman_timeout(void *arg) 633{ 634 spa_t *spa = arg; 635 636 taskqueue_enqueue(taskqueue_thread, &spa->spa_deadman_task); 637} 638#endif 639 640/* 641 * Create an uninitialized spa_t with the given name. Requires 642 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 643 * exist by calling spa_lookup() first. 644 */ 645spa_t * 646spa_add(const char *name, nvlist_t *config, const char *altroot) 647{ 648 spa_t *spa; 649 spa_config_dirent_t *dp; 650#ifdef illumos 651 cyc_handler_t hdlr; 652 cyc_time_t when; 653#endif 654 655 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 656 657 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 658 659 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 660 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 661 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 662 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 663 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 664 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 665 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 666 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 667 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 668 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 669 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 670 mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL); 671 672 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 673 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 674 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 675 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 676 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 677 678 for (int t = 0; t < TXG_SIZE; t++) 679 bplist_create(&spa->spa_free_bplist[t]); 680 681 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 682 spa->spa_state = POOL_STATE_UNINITIALIZED; 683 spa->spa_freeze_txg = UINT64_MAX; 684 spa->spa_final_txg = UINT64_MAX; 685 spa->spa_load_max_txg = UINT64_MAX; 686 spa->spa_proc = &p0; 687 spa->spa_proc_state = SPA_PROC_NONE; 688 689#ifdef illumos 690 hdlr.cyh_func = spa_deadman; 691 hdlr.cyh_arg = spa; 692 hdlr.cyh_level = CY_LOW_LEVEL; 693#endif 694 695 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 696 697#ifdef illumos 698 /* 699 * This determines how often we need to check for hung I/Os after 700 * the cyclic has already fired. Since checking for hung I/Os is 701 * an expensive operation we don't want to check too frequently. 702 * Instead wait for 5 seconds before checking again. 703 */ 704 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 705 when.cyt_when = CY_INFINITY; 706 mutex_enter(&cpu_lock); 707 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 708 mutex_exit(&cpu_lock); 709#else /* !illumos */ 710#ifdef _KERNEL 711 /* 712 * callout(9) does not provide a way to initialize a callout with 713 * a function and an argument, so we use callout_reset() to schedule 714 * the callout in the very distant future. Even if that event ever 715 * fires, it should be okayas we won't have any active zio-s. 716 * But normally spa_sync() will reschedule the callout with a proper 717 * timeout. 718 * callout(9) does not allow the callback function to sleep but 719 * vdev_deadman() needs to acquire vq_lock and illumos mutexes are 720 * emulated using sx(9). For this reason spa_deadman_timeout() 721 * will schedule spa_deadman() as task on a taskqueue that allows 722 * sleeping. 723 */ 724 TASK_INIT(&spa->spa_deadman_task, 0, spa_deadman, spa); 725 callout_init(&spa->spa_deadman_cycid, 1); 726 callout_reset_sbt(&spa->spa_deadman_cycid, SBT_MAX, 0, 727 spa_deadman_timeout, spa, 0); 728#endif 729#endif 730 refcount_create(&spa->spa_refcount); 731 spa_config_lock_init(spa); 732 733 avl_add(&spa_namespace_avl, spa); 734 735 /* 736 * Set the alternate root, if there is one. 737 */ 738 if (altroot) { 739 spa->spa_root = spa_strdup(altroot); 740 spa_active_count++; 741 } 742 743 avl_create(&spa->spa_alloc_tree, zio_timestamp_compare, 744 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 745 746 /* 747 * Every pool starts with the default cachefile 748 */ 749 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 750 offsetof(spa_config_dirent_t, scd_link)); 751 752 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 753 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 754 list_insert_head(&spa->spa_config_list, dp); 755 756 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 757 KM_SLEEP) == 0); 758 759 if (config != NULL) { 760 nvlist_t *features; 761 762 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 763 &features) == 0) { 764 VERIFY(nvlist_dup(features, &spa->spa_label_features, 765 0) == 0); 766 } 767 768 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 769 } 770 771 if (spa->spa_label_features == NULL) { 772 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 773 KM_SLEEP) == 0); 774 } 775 776 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 777 778 spa->spa_min_ashift = INT_MAX; 779 spa->spa_max_ashift = 0; 780 781 /* 782 * As a pool is being created, treat all features as disabled by 783 * setting SPA_FEATURE_DISABLED for all entries in the feature 784 * refcount cache. 785 */ 786 for (int i = 0; i < SPA_FEATURES; i++) { 787 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 788 } 789 790 return (spa); 791} 792 793/* 794 * Removes a spa_t from the namespace, freeing up any memory used. Requires 795 * spa_namespace_lock. This is called only after the spa_t has been closed and 796 * deactivated. 797 */ 798void 799spa_remove(spa_t *spa) 800{ 801 spa_config_dirent_t *dp; 802 803 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 804 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 805 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 806 807 nvlist_free(spa->spa_config_splitting); 808 809 avl_remove(&spa_namespace_avl, spa); 810 cv_broadcast(&spa_namespace_cv); 811 812 if (spa->spa_root) { 813 spa_strfree(spa->spa_root); 814 spa_active_count--; 815 } 816 817 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 818 list_remove(&spa->spa_config_list, dp); 819 if (dp->scd_path != NULL) 820 spa_strfree(dp->scd_path); 821 kmem_free(dp, sizeof (spa_config_dirent_t)); 822 } 823 824 avl_destroy(&spa->spa_alloc_tree); 825 list_destroy(&spa->spa_config_list); 826 827 nvlist_free(spa->spa_label_features); 828 nvlist_free(spa->spa_load_info); 829 spa_config_set(spa, NULL); 830 831#ifdef illumos 832 mutex_enter(&cpu_lock); 833 if (spa->spa_deadman_cycid != CYCLIC_NONE) 834 cyclic_remove(spa->spa_deadman_cycid); 835 mutex_exit(&cpu_lock); 836 spa->spa_deadman_cycid = CYCLIC_NONE; 837#else /* !illumos */ 838#ifdef _KERNEL 839 callout_drain(&spa->spa_deadman_cycid); 840 taskqueue_drain(taskqueue_thread, &spa->spa_deadman_task); 841#endif 842#endif 843 844 refcount_destroy(&spa->spa_refcount); 845 846 spa_config_lock_destroy(spa); 847 848 for (int t = 0; t < TXG_SIZE; t++) 849 bplist_destroy(&spa->spa_free_bplist[t]); 850 851 zio_checksum_templates_free(spa); 852 853 cv_destroy(&spa->spa_async_cv); 854 cv_destroy(&spa->spa_evicting_os_cv); 855 cv_destroy(&spa->spa_proc_cv); 856 cv_destroy(&spa->spa_scrub_io_cv); 857 cv_destroy(&spa->spa_suspend_cv); 858 859 mutex_destroy(&spa->spa_alloc_lock); 860 mutex_destroy(&spa->spa_async_lock); 861 mutex_destroy(&spa->spa_errlist_lock); 862 mutex_destroy(&spa->spa_errlog_lock); 863 mutex_destroy(&spa->spa_evicting_os_lock); 864 mutex_destroy(&spa->spa_history_lock); 865 mutex_destroy(&spa->spa_proc_lock); 866 mutex_destroy(&spa->spa_props_lock); 867 mutex_destroy(&spa->spa_cksum_tmpls_lock); 868 mutex_destroy(&spa->spa_scrub_lock); 869 mutex_destroy(&spa->spa_suspend_lock); 870 mutex_destroy(&spa->spa_vdev_top_lock); 871 872 kmem_free(spa, sizeof (spa_t)); 873} 874 875/* 876 * Given a pool, return the next pool in the namespace, or NULL if there is 877 * none. If 'prev' is NULL, return the first pool. 878 */ 879spa_t * 880spa_next(spa_t *prev) 881{ 882 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 883 884 if (prev) 885 return (AVL_NEXT(&spa_namespace_avl, prev)); 886 else 887 return (avl_first(&spa_namespace_avl)); 888} 889 890/* 891 * ========================================================================== 892 * SPA refcount functions 893 * ========================================================================== 894 */ 895 896/* 897 * Add a reference to the given spa_t. Must have at least one reference, or 898 * have the namespace lock held. 899 */ 900void 901spa_open_ref(spa_t *spa, void *tag) 902{ 903 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 904 MUTEX_HELD(&spa_namespace_lock)); 905 (void) refcount_add(&spa->spa_refcount, tag); 906} 907 908/* 909 * Remove a reference to the given spa_t. Must have at least one reference, or 910 * have the namespace lock held. 911 */ 912void 913spa_close(spa_t *spa, void *tag) 914{ 915 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 916 MUTEX_HELD(&spa_namespace_lock)); 917 (void) refcount_remove(&spa->spa_refcount, tag); 918} 919 920/* 921 * Remove a reference to the given spa_t held by a dsl dir that is 922 * being asynchronously released. Async releases occur from a taskq 923 * performing eviction of dsl datasets and dirs. The namespace lock 924 * isn't held and the hold by the object being evicted may contribute to 925 * spa_minref (e.g. dataset or directory released during pool export), 926 * so the asserts in spa_close() do not apply. 927 */ 928void 929spa_async_close(spa_t *spa, void *tag) 930{ 931 (void) refcount_remove(&spa->spa_refcount, tag); 932} 933 934/* 935 * Check to see if the spa refcount is zero. Must be called with 936 * spa_namespace_lock held. We really compare against spa_minref, which is the 937 * number of references acquired when opening a pool 938 */ 939boolean_t 940spa_refcount_zero(spa_t *spa) 941{ 942 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 943 944 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 945} 946 947/* 948 * ========================================================================== 949 * SPA spare and l2cache tracking 950 * ========================================================================== 951 */ 952 953/* 954 * Hot spares and cache devices are tracked using the same code below, 955 * for 'auxiliary' devices. 956 */ 957 958typedef struct spa_aux { 959 uint64_t aux_guid; 960 uint64_t aux_pool; 961 avl_node_t aux_avl; 962 int aux_count; 963} spa_aux_t; 964 965static int 966spa_aux_compare(const void *a, const void *b) 967{ 968 const spa_aux_t *sa = a; 969 const spa_aux_t *sb = b; 970 971 if (sa->aux_guid < sb->aux_guid) 972 return (-1); 973 else if (sa->aux_guid > sb->aux_guid) 974 return (1); 975 else 976 return (0); 977} 978 979void 980spa_aux_add(vdev_t *vd, avl_tree_t *avl) 981{ 982 avl_index_t where; 983 spa_aux_t search; 984 spa_aux_t *aux; 985 986 search.aux_guid = vd->vdev_guid; 987 if ((aux = avl_find(avl, &search, &where)) != NULL) { 988 aux->aux_count++; 989 } else { 990 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 991 aux->aux_guid = vd->vdev_guid; 992 aux->aux_count = 1; 993 avl_insert(avl, aux, where); 994 } 995} 996 997void 998spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 999{ 1000 spa_aux_t search; 1001 spa_aux_t *aux; 1002 avl_index_t where; 1003 1004 search.aux_guid = vd->vdev_guid; 1005 aux = avl_find(avl, &search, &where); 1006 1007 ASSERT(aux != NULL); 1008 1009 if (--aux->aux_count == 0) { 1010 avl_remove(avl, aux); 1011 kmem_free(aux, sizeof (spa_aux_t)); 1012 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 1013 aux->aux_pool = 0ULL; 1014 } 1015} 1016 1017boolean_t 1018spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 1019{ 1020 spa_aux_t search, *found; 1021 1022 search.aux_guid = guid; 1023 found = avl_find(avl, &search, NULL); 1024 1025 if (pool) { 1026 if (found) 1027 *pool = found->aux_pool; 1028 else 1029 *pool = 0ULL; 1030 } 1031 1032 if (refcnt) { 1033 if (found) 1034 *refcnt = found->aux_count; 1035 else 1036 *refcnt = 0; 1037 } 1038 1039 return (found != NULL); 1040} 1041 1042void 1043spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1044{ 1045 spa_aux_t search, *found; 1046 avl_index_t where; 1047 1048 search.aux_guid = vd->vdev_guid; 1049 found = avl_find(avl, &search, &where); 1050 ASSERT(found != NULL); 1051 ASSERT(found->aux_pool == 0ULL); 1052 1053 found->aux_pool = spa_guid(vd->vdev_spa); 1054} 1055 1056/* 1057 * Spares are tracked globally due to the following constraints: 1058 * 1059 * - A spare may be part of multiple pools. 1060 * - A spare may be added to a pool even if it's actively in use within 1061 * another pool. 1062 * - A spare in use in any pool can only be the source of a replacement if 1063 * the target is a spare in the same pool. 1064 * 1065 * We keep track of all spares on the system through the use of a reference 1066 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1067 * spare, then we bump the reference count in the AVL tree. In addition, we set 1068 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1069 * inactive). When a spare is made active (used to replace a device in the 1070 * pool), we also keep track of which pool its been made a part of. 1071 * 1072 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1073 * called under the spa_namespace lock as part of vdev reconfiguration. The 1074 * separate spare lock exists for the status query path, which does not need to 1075 * be completely consistent with respect to other vdev configuration changes. 1076 */ 1077 1078static int 1079spa_spare_compare(const void *a, const void *b) 1080{ 1081 return (spa_aux_compare(a, b)); 1082} 1083 1084void 1085spa_spare_add(vdev_t *vd) 1086{ 1087 mutex_enter(&spa_spare_lock); 1088 ASSERT(!vd->vdev_isspare); 1089 spa_aux_add(vd, &spa_spare_avl); 1090 vd->vdev_isspare = B_TRUE; 1091 mutex_exit(&spa_spare_lock); 1092} 1093 1094void 1095spa_spare_remove(vdev_t *vd) 1096{ 1097 mutex_enter(&spa_spare_lock); 1098 ASSERT(vd->vdev_isspare); 1099 spa_aux_remove(vd, &spa_spare_avl); 1100 vd->vdev_isspare = B_FALSE; 1101 mutex_exit(&spa_spare_lock); 1102} 1103 1104boolean_t 1105spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1106{ 1107 boolean_t found; 1108 1109 mutex_enter(&spa_spare_lock); 1110 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1111 mutex_exit(&spa_spare_lock); 1112 1113 return (found); 1114} 1115 1116void 1117spa_spare_activate(vdev_t *vd) 1118{ 1119 mutex_enter(&spa_spare_lock); 1120 ASSERT(vd->vdev_isspare); 1121 spa_aux_activate(vd, &spa_spare_avl); 1122 mutex_exit(&spa_spare_lock); 1123} 1124 1125/* 1126 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1127 * Cache devices currently only support one pool per cache device, and so 1128 * for these devices the aux reference count is currently unused beyond 1. 1129 */ 1130 1131static int 1132spa_l2cache_compare(const void *a, const void *b) 1133{ 1134 return (spa_aux_compare(a, b)); 1135} 1136 1137void 1138spa_l2cache_add(vdev_t *vd) 1139{ 1140 mutex_enter(&spa_l2cache_lock); 1141 ASSERT(!vd->vdev_isl2cache); 1142 spa_aux_add(vd, &spa_l2cache_avl); 1143 vd->vdev_isl2cache = B_TRUE; 1144 mutex_exit(&spa_l2cache_lock); 1145} 1146 1147void 1148spa_l2cache_remove(vdev_t *vd) 1149{ 1150 mutex_enter(&spa_l2cache_lock); 1151 ASSERT(vd->vdev_isl2cache); 1152 spa_aux_remove(vd, &spa_l2cache_avl); 1153 vd->vdev_isl2cache = B_FALSE; 1154 mutex_exit(&spa_l2cache_lock); 1155} 1156 1157boolean_t 1158spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1159{ 1160 boolean_t found; 1161 1162 mutex_enter(&spa_l2cache_lock); 1163 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1164 mutex_exit(&spa_l2cache_lock); 1165 1166 return (found); 1167} 1168 1169void 1170spa_l2cache_activate(vdev_t *vd) 1171{ 1172 mutex_enter(&spa_l2cache_lock); 1173 ASSERT(vd->vdev_isl2cache); 1174 spa_aux_activate(vd, &spa_l2cache_avl); 1175 mutex_exit(&spa_l2cache_lock); 1176} 1177 1178/* 1179 * ========================================================================== 1180 * SPA vdev locking 1181 * ========================================================================== 1182 */ 1183 1184/* 1185 * Lock the given spa_t for the purpose of adding or removing a vdev. 1186 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1187 * It returns the next transaction group for the spa_t. 1188 */ 1189uint64_t 1190spa_vdev_enter(spa_t *spa) 1191{ 1192 mutex_enter(&spa->spa_vdev_top_lock); 1193 mutex_enter(&spa_namespace_lock); 1194 return (spa_vdev_config_enter(spa)); 1195} 1196 1197/* 1198 * Internal implementation for spa_vdev_enter(). Used when a vdev 1199 * operation requires multiple syncs (i.e. removing a device) while 1200 * keeping the spa_namespace_lock held. 1201 */ 1202uint64_t 1203spa_vdev_config_enter(spa_t *spa) 1204{ 1205 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1206 1207 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1208 1209 return (spa_last_synced_txg(spa) + 1); 1210} 1211 1212/* 1213 * Used in combination with spa_vdev_config_enter() to allow the syncing 1214 * of multiple transactions without releasing the spa_namespace_lock. 1215 */ 1216void 1217spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1218{ 1219 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1220 1221 int config_changed = B_FALSE; 1222 1223 ASSERT(txg > spa_last_synced_txg(spa)); 1224 1225 spa->spa_pending_vdev = NULL; 1226 1227 /* 1228 * Reassess the DTLs. 1229 */ 1230 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1231 1232 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1233 config_changed = B_TRUE; 1234 spa->spa_config_generation++; 1235 } 1236 1237 /* 1238 * Verify the metaslab classes. 1239 */ 1240 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1241 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1242 1243 spa_config_exit(spa, SCL_ALL, spa); 1244 1245 /* 1246 * Panic the system if the specified tag requires it. This 1247 * is useful for ensuring that configurations are updated 1248 * transactionally. 1249 */ 1250 if (zio_injection_enabled) 1251 zio_handle_panic_injection(spa, tag, 0); 1252 1253 /* 1254 * Note: this txg_wait_synced() is important because it ensures 1255 * that there won't be more than one config change per txg. 1256 * This allows us to use the txg as the generation number. 1257 */ 1258 if (error == 0) 1259 txg_wait_synced(spa->spa_dsl_pool, txg); 1260 1261 if (vd != NULL) { 1262 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1263 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1264 vdev_free(vd); 1265 spa_config_exit(spa, SCL_ALL, spa); 1266 } 1267 1268 /* 1269 * If the config changed, update the config cache. 1270 */ 1271 if (config_changed) 1272 spa_config_sync(spa, B_FALSE, B_TRUE); 1273} 1274 1275/* 1276 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1277 * locking of spa_vdev_enter(), we also want make sure the transactions have 1278 * synced to disk, and then update the global configuration cache with the new 1279 * information. 1280 */ 1281int 1282spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1283{ 1284 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1285 mutex_exit(&spa_namespace_lock); 1286 mutex_exit(&spa->spa_vdev_top_lock); 1287 1288 return (error); 1289} 1290 1291/* 1292 * Lock the given spa_t for the purpose of changing vdev state. 1293 */ 1294void 1295spa_vdev_state_enter(spa_t *spa, int oplocks) 1296{ 1297 int locks = SCL_STATE_ALL | oplocks; 1298 1299 /* 1300 * Root pools may need to read of the underlying devfs filesystem 1301 * when opening up a vdev. Unfortunately if we're holding the 1302 * SCL_ZIO lock it will result in a deadlock when we try to issue 1303 * the read from the root filesystem. Instead we "prefetch" 1304 * the associated vnodes that we need prior to opening the 1305 * underlying devices and cache them so that we can prevent 1306 * any I/O when we are doing the actual open. 1307 */ 1308 if (spa_is_root(spa)) { 1309 int low = locks & ~(SCL_ZIO - 1); 1310 int high = locks & ~low; 1311 1312 spa_config_enter(spa, high, spa, RW_WRITER); 1313 vdev_hold(spa->spa_root_vdev); 1314 spa_config_enter(spa, low, spa, RW_WRITER); 1315 } else { 1316 spa_config_enter(spa, locks, spa, RW_WRITER); 1317 } 1318 spa->spa_vdev_locks = locks; 1319} 1320 1321int 1322spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1323{ 1324 boolean_t config_changed = B_FALSE; 1325 1326 if (vd != NULL || error == 0) 1327 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1328 0, 0, B_FALSE); 1329 1330 if (vd != NULL) { 1331 vdev_state_dirty(vd->vdev_top); 1332 config_changed = B_TRUE; 1333 spa->spa_config_generation++; 1334 } 1335 1336 if (spa_is_root(spa)) 1337 vdev_rele(spa->spa_root_vdev); 1338 1339 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1340 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1341 1342 /* 1343 * If anything changed, wait for it to sync. This ensures that, 1344 * from the system administrator's perspective, zpool(1M) commands 1345 * are synchronous. This is important for things like zpool offline: 1346 * when the command completes, you expect no further I/O from ZFS. 1347 */ 1348 if (vd != NULL) 1349 txg_wait_synced(spa->spa_dsl_pool, 0); 1350 1351 /* 1352 * If the config changed, update the config cache. 1353 */ 1354 if (config_changed) { 1355 mutex_enter(&spa_namespace_lock); 1356 spa_config_sync(spa, B_FALSE, B_TRUE); 1357 mutex_exit(&spa_namespace_lock); 1358 } 1359 1360 return (error); 1361} 1362 1363/* 1364 * ========================================================================== 1365 * Miscellaneous functions 1366 * ========================================================================== 1367 */ 1368 1369void 1370spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1371{ 1372 if (!nvlist_exists(spa->spa_label_features, feature)) { 1373 fnvlist_add_boolean(spa->spa_label_features, feature); 1374 /* 1375 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1376 * dirty the vdev config because lock SCL_CONFIG is not held. 1377 * Thankfully, in this case we don't need to dirty the config 1378 * because it will be written out anyway when we finish 1379 * creating the pool. 1380 */ 1381 if (tx->tx_txg != TXG_INITIAL) 1382 vdev_config_dirty(spa->spa_root_vdev); 1383 } 1384} 1385 1386void 1387spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1388{ 1389 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1390 vdev_config_dirty(spa->spa_root_vdev); 1391} 1392 1393/* 1394 * Rename a spa_t. 1395 */ 1396int 1397spa_rename(const char *name, const char *newname) 1398{ 1399 spa_t *spa; 1400 int err; 1401 1402 /* 1403 * Lookup the spa_t and grab the config lock for writing. We need to 1404 * actually open the pool so that we can sync out the necessary labels. 1405 * It's OK to call spa_open() with the namespace lock held because we 1406 * allow recursive calls for other reasons. 1407 */ 1408 mutex_enter(&spa_namespace_lock); 1409 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1410 mutex_exit(&spa_namespace_lock); 1411 return (err); 1412 } 1413 1414 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1415 1416 avl_remove(&spa_namespace_avl, spa); 1417 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1418 avl_add(&spa_namespace_avl, spa); 1419 1420 /* 1421 * Sync all labels to disk with the new names by marking the root vdev 1422 * dirty and waiting for it to sync. It will pick up the new pool name 1423 * during the sync. 1424 */ 1425 vdev_config_dirty(spa->spa_root_vdev); 1426 1427 spa_config_exit(spa, SCL_ALL, FTAG); 1428 1429 txg_wait_synced(spa->spa_dsl_pool, 0); 1430 1431 /* 1432 * Sync the updated config cache. 1433 */ 1434 spa_config_sync(spa, B_FALSE, B_TRUE); 1435 1436 spa_close(spa, FTAG); 1437 1438 mutex_exit(&spa_namespace_lock); 1439 1440 return (0); 1441} 1442 1443/* 1444 * Return the spa_t associated with given pool_guid, if it exists. If 1445 * device_guid is non-zero, determine whether the pool exists *and* contains 1446 * a device with the specified device_guid. 1447 */ 1448spa_t * 1449spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1450{ 1451 spa_t *spa; 1452 avl_tree_t *t = &spa_namespace_avl; 1453 1454 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1455 1456 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1457 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1458 continue; 1459 if (spa->spa_root_vdev == NULL) 1460 continue; 1461 if (spa_guid(spa) == pool_guid) { 1462 if (device_guid == 0) 1463 break; 1464 1465 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1466 device_guid) != NULL) 1467 break; 1468 1469 /* 1470 * Check any devices we may be in the process of adding. 1471 */ 1472 if (spa->spa_pending_vdev) { 1473 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1474 device_guid) != NULL) 1475 break; 1476 } 1477 } 1478 } 1479 1480 return (spa); 1481} 1482 1483/* 1484 * Determine whether a pool with the given pool_guid exists. 1485 */ 1486boolean_t 1487spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1488{ 1489 return (spa_by_guid(pool_guid, device_guid) != NULL); 1490} 1491 1492char * 1493spa_strdup(const char *s) 1494{ 1495 size_t len; 1496 char *new; 1497 1498 len = strlen(s); 1499 new = kmem_alloc(len + 1, KM_SLEEP); 1500 bcopy(s, new, len); 1501 new[len] = '\0'; 1502 1503 return (new); 1504} 1505 1506void 1507spa_strfree(char *s) 1508{ 1509 kmem_free(s, strlen(s) + 1); 1510} 1511 1512uint64_t 1513spa_get_random(uint64_t range) 1514{ 1515 uint64_t r; 1516 1517 ASSERT(range != 0); 1518 1519 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1520 1521 return (r % range); 1522} 1523 1524uint64_t 1525spa_generate_guid(spa_t *spa) 1526{ 1527 uint64_t guid = spa_get_random(-1ULL); 1528 1529 if (spa != NULL) { 1530 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1531 guid = spa_get_random(-1ULL); 1532 } else { 1533 while (guid == 0 || spa_guid_exists(guid, 0)) 1534 guid = spa_get_random(-1ULL); 1535 } 1536 1537 return (guid); 1538} 1539 1540void 1541snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1542{ 1543 char type[256]; 1544 char *checksum = NULL; 1545 char *compress = NULL; 1546 1547 if (bp != NULL) { 1548 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1549 dmu_object_byteswap_t bswap = 1550 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1551 (void) snprintf(type, sizeof (type), "bswap %s %s", 1552 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1553 "metadata" : "data", 1554 dmu_ot_byteswap[bswap].ob_name); 1555 } else { 1556 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1557 sizeof (type)); 1558 } 1559 if (!BP_IS_EMBEDDED(bp)) { 1560 checksum = 1561 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1562 } 1563 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1564 } 1565 1566 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1567 compress); 1568} 1569 1570void 1571spa_freeze(spa_t *spa) 1572{ 1573 uint64_t freeze_txg = 0; 1574 1575 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1576 if (spa->spa_freeze_txg == UINT64_MAX) { 1577 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1578 spa->spa_freeze_txg = freeze_txg; 1579 } 1580 spa_config_exit(spa, SCL_ALL, FTAG); 1581 if (freeze_txg != 0) 1582 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1583} 1584 1585void 1586zfs_panic_recover(const char *fmt, ...) 1587{ 1588 va_list adx; 1589 1590 va_start(adx, fmt); 1591 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1592 va_end(adx); 1593} 1594 1595/* 1596 * This is a stripped-down version of strtoull, suitable only for converting 1597 * lowercase hexadecimal numbers that don't overflow. 1598 */ 1599uint64_t 1600zfs_strtonum(const char *str, char **nptr) 1601{ 1602 uint64_t val = 0; 1603 char c; 1604 int digit; 1605 1606 while ((c = *str) != '\0') { 1607 if (c >= '0' && c <= '9') 1608 digit = c - '0'; 1609 else if (c >= 'a' && c <= 'f') 1610 digit = 10 + c - 'a'; 1611 else 1612 break; 1613 1614 val *= 16; 1615 val += digit; 1616 1617 str++; 1618 } 1619 1620 if (nptr) 1621 *nptr = (char *)str; 1622 1623 return (val); 1624} 1625 1626/* 1627 * ========================================================================== 1628 * Accessor functions 1629 * ========================================================================== 1630 */ 1631 1632boolean_t 1633spa_shutting_down(spa_t *spa) 1634{ 1635 return (spa->spa_async_suspended); 1636} 1637 1638dsl_pool_t * 1639spa_get_dsl(spa_t *spa) 1640{ 1641 return (spa->spa_dsl_pool); 1642} 1643 1644boolean_t 1645spa_is_initializing(spa_t *spa) 1646{ 1647 return (spa->spa_is_initializing); 1648} 1649 1650blkptr_t * 1651spa_get_rootblkptr(spa_t *spa) 1652{ 1653 return (&spa->spa_ubsync.ub_rootbp); 1654} 1655 1656void 1657spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1658{ 1659 spa->spa_uberblock.ub_rootbp = *bp; 1660} 1661 1662void 1663spa_altroot(spa_t *spa, char *buf, size_t buflen) 1664{ 1665 if (spa->spa_root == NULL) 1666 buf[0] = '\0'; 1667 else 1668 (void) strncpy(buf, spa->spa_root, buflen); 1669} 1670 1671int 1672spa_sync_pass(spa_t *spa) 1673{ 1674 return (spa->spa_sync_pass); 1675} 1676 1677char * 1678spa_name(spa_t *spa) 1679{ 1680 return (spa->spa_name); 1681} 1682 1683uint64_t 1684spa_guid(spa_t *spa) 1685{ 1686 dsl_pool_t *dp = spa_get_dsl(spa); 1687 uint64_t guid; 1688 1689 /* 1690 * If we fail to parse the config during spa_load(), we can go through 1691 * the error path (which posts an ereport) and end up here with no root 1692 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1693 * this case. 1694 */ 1695 if (spa->spa_root_vdev == NULL) 1696 return (spa->spa_config_guid); 1697 1698 guid = spa->spa_last_synced_guid != 0 ? 1699 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1700 1701 /* 1702 * Return the most recently synced out guid unless we're 1703 * in syncing context. 1704 */ 1705 if (dp && dsl_pool_sync_context(dp)) 1706 return (spa->spa_root_vdev->vdev_guid); 1707 else 1708 return (guid); 1709} 1710 1711uint64_t 1712spa_load_guid(spa_t *spa) 1713{ 1714 /* 1715 * This is a GUID that exists solely as a reference for the 1716 * purposes of the arc. It is generated at load time, and 1717 * is never written to persistent storage. 1718 */ 1719 return (spa->spa_load_guid); 1720} 1721 1722uint64_t 1723spa_last_synced_txg(spa_t *spa) 1724{ 1725 return (spa->spa_ubsync.ub_txg); 1726} 1727 1728uint64_t 1729spa_first_txg(spa_t *spa) 1730{ 1731 return (spa->spa_first_txg); 1732} 1733 1734uint64_t 1735spa_syncing_txg(spa_t *spa) 1736{ 1737 return (spa->spa_syncing_txg); 1738} 1739 1740pool_state_t 1741spa_state(spa_t *spa) 1742{ 1743 return (spa->spa_state); 1744} 1745 1746spa_load_state_t 1747spa_load_state(spa_t *spa) 1748{ 1749 return (spa->spa_load_state); 1750} 1751 1752uint64_t 1753spa_freeze_txg(spa_t *spa) 1754{ 1755 return (spa->spa_freeze_txg); 1756} 1757 1758/* ARGSUSED */ 1759uint64_t 1760spa_get_asize(spa_t *spa, uint64_t lsize) 1761{ 1762 return (lsize * spa_asize_inflation); 1763} 1764 1765/* 1766 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1767 * or at least 128MB, unless that would cause it to be more than half the 1768 * pool size. 1769 * 1770 * See the comment above spa_slop_shift for details. 1771 */ 1772uint64_t 1773spa_get_slop_space(spa_t *spa) 1774{ 1775 uint64_t space = spa_get_dspace(spa); 1776 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1777} 1778 1779uint64_t 1780spa_get_dspace(spa_t *spa) 1781{ 1782 return (spa->spa_dspace); 1783} 1784 1785void 1786spa_update_dspace(spa_t *spa) 1787{ 1788 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1789 ddt_get_dedup_dspace(spa); 1790} 1791 1792/* 1793 * Return the failure mode that has been set to this pool. The default 1794 * behavior will be to block all I/Os when a complete failure occurs. 1795 */ 1796uint8_t 1797spa_get_failmode(spa_t *spa) 1798{ 1799 return (spa->spa_failmode); 1800} 1801 1802boolean_t 1803spa_suspended(spa_t *spa) 1804{ 1805 return (spa->spa_suspended); 1806} 1807 1808uint64_t 1809spa_version(spa_t *spa) 1810{ 1811 return (spa->spa_ubsync.ub_version); 1812} 1813 1814boolean_t 1815spa_deflate(spa_t *spa) 1816{ 1817 return (spa->spa_deflate); 1818} 1819 1820metaslab_class_t * 1821spa_normal_class(spa_t *spa) 1822{ 1823 return (spa->spa_normal_class); 1824} 1825 1826metaslab_class_t * 1827spa_log_class(spa_t *spa) 1828{ 1829 return (spa->spa_log_class); 1830} 1831 1832void 1833spa_evicting_os_register(spa_t *spa, objset_t *os) 1834{ 1835 mutex_enter(&spa->spa_evicting_os_lock); 1836 list_insert_head(&spa->spa_evicting_os_list, os); 1837 mutex_exit(&spa->spa_evicting_os_lock); 1838} 1839 1840void 1841spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1842{ 1843 mutex_enter(&spa->spa_evicting_os_lock); 1844 list_remove(&spa->spa_evicting_os_list, os); 1845 cv_broadcast(&spa->spa_evicting_os_cv); 1846 mutex_exit(&spa->spa_evicting_os_lock); 1847} 1848 1849void 1850spa_evicting_os_wait(spa_t *spa) 1851{ 1852 mutex_enter(&spa->spa_evicting_os_lock); 1853 while (!list_is_empty(&spa->spa_evicting_os_list)) 1854 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1855 mutex_exit(&spa->spa_evicting_os_lock); 1856 1857 dmu_buf_user_evict_wait(); 1858} 1859 1860int 1861spa_max_replication(spa_t *spa) 1862{ 1863 /* 1864 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1865 * handle BPs with more than one DVA allocated. Set our max 1866 * replication level accordingly. 1867 */ 1868 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1869 return (1); 1870 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1871} 1872 1873int 1874spa_prev_software_version(spa_t *spa) 1875{ 1876 return (spa->spa_prev_software_version); 1877} 1878 1879uint64_t 1880spa_deadman_synctime(spa_t *spa) 1881{ 1882 return (spa->spa_deadman_synctime); 1883} 1884 1885uint64_t 1886dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1887{ 1888 uint64_t asize = DVA_GET_ASIZE(dva); 1889 uint64_t dsize = asize; 1890 1891 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1892 1893 if (asize != 0 && spa->spa_deflate) { 1894 uint64_t vdev = DVA_GET_VDEV(dva); 1895 vdev_t *vd = vdev_lookup_top(spa, vdev); 1896 if (vd == NULL) { 1897 panic( 1898 "dva_get_dsize_sync(): bad DVA %llu:%llu", 1899 (u_longlong_t)vdev, (u_longlong_t)asize); 1900 } 1901 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1902 } 1903 1904 return (dsize); 1905} 1906 1907uint64_t 1908bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1909{ 1910 uint64_t dsize = 0; 1911 1912 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1913 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1914 1915 return (dsize); 1916} 1917 1918uint64_t 1919bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1920{ 1921 uint64_t dsize = 0; 1922 1923 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1924 1925 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1926 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1927 1928 spa_config_exit(spa, SCL_VDEV, FTAG); 1929 1930 return (dsize); 1931} 1932 1933/* 1934 * ========================================================================== 1935 * Initialization and Termination 1936 * ========================================================================== 1937 */ 1938 1939static int 1940spa_name_compare(const void *a1, const void *a2) 1941{ 1942 const spa_t *s1 = a1; 1943 const spa_t *s2 = a2; 1944 int s; 1945 1946 s = strcmp(s1->spa_name, s2->spa_name); 1947 if (s > 0) 1948 return (1); 1949 if (s < 0) 1950 return (-1); 1951 return (0); 1952} 1953 1954int 1955spa_busy(void) 1956{ 1957 return (spa_active_count); 1958} 1959 1960void 1961spa_boot_init() 1962{ 1963 spa_config_load(); 1964} 1965 1966#ifdef _KERNEL 1967EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1968#endif 1969 1970void 1971spa_init(int mode) 1972{ 1973 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1974 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1975 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1976 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1977 1978 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1979 offsetof(spa_t, spa_avl)); 1980 1981 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1982 offsetof(spa_aux_t, aux_avl)); 1983 1984 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1985 offsetof(spa_aux_t, aux_avl)); 1986 1987 spa_mode_global = mode; 1988 1989#ifdef illumos 1990#ifdef _KERNEL 1991 spa_arch_init(); 1992#else 1993 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1994 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1995 if (arc_procfd == -1) { 1996 perror("could not enable watchpoints: " 1997 "opening /proc/self/ctl failed: "); 1998 } else { 1999 arc_watch = B_TRUE; 2000 } 2001 } 2002#endif 2003#endif /* illumos */ 2004 refcount_sysinit(); 2005 unique_init(); 2006 range_tree_init(); 2007 zio_init(); 2008 lz4_init(); 2009 dmu_init(); 2010 zil_init(); 2011 vdev_cache_stat_init(); 2012 zfs_prop_init(); 2013 zpool_prop_init(); 2014 zpool_feature_init(); 2015 spa_config_load(); 2016 l2arc_start(); 2017#ifndef illumos 2018#ifdef _KERNEL 2019 zfs_deadman_init(); 2020#endif 2021#endif /* !illumos */ 2022} 2023 2024void 2025spa_fini(void) 2026{ 2027 l2arc_stop(); 2028 2029 spa_evict_all(); 2030 2031 vdev_cache_stat_fini(); 2032 zil_fini(); 2033 dmu_fini(); 2034 lz4_fini(); 2035 zio_fini(); 2036 range_tree_fini(); 2037 unique_fini(); 2038 refcount_fini(); 2039 2040 avl_destroy(&spa_namespace_avl); 2041 avl_destroy(&spa_spare_avl); 2042 avl_destroy(&spa_l2cache_avl); 2043 2044 cv_destroy(&spa_namespace_cv); 2045 mutex_destroy(&spa_namespace_lock); 2046 mutex_destroy(&spa_spare_lock); 2047 mutex_destroy(&spa_l2cache_lock); 2048} 2049 2050/* 2051 * Return whether this pool has slogs. No locking needed. 2052 * It's not a problem if the wrong answer is returned as it's only for 2053 * performance and not correctness 2054 */ 2055boolean_t 2056spa_has_slogs(spa_t *spa) 2057{ 2058 return (spa->spa_log_class->mc_rotor != NULL); 2059} 2060 2061spa_log_state_t 2062spa_get_log_state(spa_t *spa) 2063{ 2064 return (spa->spa_log_state); 2065} 2066 2067void 2068spa_set_log_state(spa_t *spa, spa_log_state_t state) 2069{ 2070 spa->spa_log_state = state; 2071} 2072 2073boolean_t 2074spa_is_root(spa_t *spa) 2075{ 2076 return (spa->spa_is_root); 2077} 2078 2079boolean_t 2080spa_writeable(spa_t *spa) 2081{ 2082 return (!!(spa->spa_mode & FWRITE)); 2083} 2084 2085/* 2086 * Returns true if there is a pending sync task in any of the current 2087 * syncing txg, the current quiescing txg, or the current open txg. 2088 */ 2089boolean_t 2090spa_has_pending_synctask(spa_t *spa) 2091{ 2092 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2093} 2094 2095int 2096spa_mode(spa_t *spa) 2097{ 2098 return (spa->spa_mode); 2099} 2100 2101uint64_t 2102spa_bootfs(spa_t *spa) 2103{ 2104 return (spa->spa_bootfs); 2105} 2106 2107uint64_t 2108spa_delegation(spa_t *spa) 2109{ 2110 return (spa->spa_delegation); 2111} 2112 2113objset_t * 2114spa_meta_objset(spa_t *spa) 2115{ 2116 return (spa->spa_meta_objset); 2117} 2118 2119enum zio_checksum 2120spa_dedup_checksum(spa_t *spa) 2121{ 2122 return (spa->spa_dedup_checksum); 2123} 2124 2125/* 2126 * Reset pool scan stat per scan pass (or reboot). 2127 */ 2128void 2129spa_scan_stat_init(spa_t *spa) 2130{ 2131 /* data not stored on disk */ 2132 spa->spa_scan_pass_start = gethrestime_sec(); 2133 spa->spa_scan_pass_exam = 0; 2134 vdev_scan_stat_init(spa->spa_root_vdev); 2135} 2136 2137/* 2138 * Get scan stats for zpool status reports 2139 */ 2140int 2141spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2142{ 2143 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2144 2145 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2146 return (SET_ERROR(ENOENT)); 2147 bzero(ps, sizeof (pool_scan_stat_t)); 2148 2149 /* data stored on disk */ 2150 ps->pss_func = scn->scn_phys.scn_func; 2151 ps->pss_start_time = scn->scn_phys.scn_start_time; 2152 ps->pss_end_time = scn->scn_phys.scn_end_time; 2153 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2154 ps->pss_examined = scn->scn_phys.scn_examined; 2155 ps->pss_to_process = scn->scn_phys.scn_to_process; 2156 ps->pss_processed = scn->scn_phys.scn_processed; 2157 ps->pss_errors = scn->scn_phys.scn_errors; 2158 ps->pss_state = scn->scn_phys.scn_state; 2159 2160 /* data not stored on disk */ 2161 ps->pss_pass_start = spa->spa_scan_pass_start; 2162 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2163 2164 return (0); 2165} 2166 2167boolean_t 2168spa_debug_enabled(spa_t *spa) 2169{ 2170 return (spa->spa_debug); 2171} 2172 2173int 2174spa_maxblocksize(spa_t *spa) 2175{ 2176 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2177 return (SPA_MAXBLOCKSIZE); 2178 else 2179 return (SPA_OLD_MAXBLOCKSIZE); 2180} 2181