1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31#include <sys/zfs_context.h> 32#include <sys/spa_impl.h> 33#include <sys/spa_boot.h> 34#include <sys/zio.h> 35#include <sys/zio_checksum.h> 36#include <sys/zio_compress.h> 37#include <sys/dmu.h> 38#include <sys/dmu_tx.h> 39#include <sys/zap.h> 40#include <sys/zil.h> 41#include <sys/vdev_impl.h> 42#include <sys/vdev_file.h> 43#include <sys/metaslab.h> 44#include <sys/uberblock_impl.h> 45#include <sys/txg.h> 46#include <sys/avl.h> 47#include <sys/unique.h> 48#include <sys/dsl_pool.h> 49#include <sys/dsl_dir.h> 50#include <sys/dsl_prop.h> 51#include <sys/dsl_scan.h> 52#include <sys/fs/zfs.h> 53#include <sys/metaslab_impl.h> 54#include <sys/arc.h> 55#include <sys/ddt.h> 56#include "zfs_prop.h" 57#include <sys/zfeature.h> 58 59/* 60 * SPA locking 61 * 62 * There are four basic locks for managing spa_t structures: 63 * 64 * spa_namespace_lock (global mutex) 65 * 66 * This lock must be acquired to do any of the following: 67 * 68 * - Lookup a spa_t by name 69 * - Add or remove a spa_t from the namespace 70 * - Increase spa_refcount from non-zero 71 * - Check if spa_refcount is zero 72 * - Rename a spa_t 73 * - add/remove/attach/detach devices 74 * - Held for the duration of create/destroy/import/export 75 * 76 * It does not need to handle recursion. A create or destroy may 77 * reference objects (files or zvols) in other pools, but by 78 * definition they must have an existing reference, and will never need 79 * to lookup a spa_t by name. 80 * 81 * spa_refcount (per-spa refcount_t protected by mutex) 82 * 83 * This reference count keep track of any active users of the spa_t. The 84 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 85 * the refcount is never really 'zero' - opening a pool implicitly keeps 86 * some references in the DMU. Internally we check against spa_minref, but 87 * present the image of a zero/non-zero value to consumers. 88 * 89 * spa_config_lock[] (per-spa array of rwlocks) 90 * 91 * This protects the spa_t from config changes, and must be held in 92 * the following circumstances: 93 * 94 * - RW_READER to perform I/O to the spa 95 * - RW_WRITER to change the vdev config 96 * 97 * The locking order is fairly straightforward: 98 * 99 * spa_namespace_lock -> spa_refcount 100 * 101 * The namespace lock must be acquired to increase the refcount from 0 102 * or to check if it is zero. 103 * 104 * spa_refcount -> spa_config_lock[] 105 * 106 * There must be at least one valid reference on the spa_t to acquire 107 * the config lock. 108 * 109 * spa_namespace_lock -> spa_config_lock[] 110 * 111 * The namespace lock must always be taken before the config lock. 112 * 113 * 114 * The spa_namespace_lock can be acquired directly and is globally visible. 115 * 116 * The namespace is manipulated using the following functions, all of which 117 * require the spa_namespace_lock to be held. 118 * 119 * spa_lookup() Lookup a spa_t by name. 120 * 121 * spa_add() Create a new spa_t in the namespace. 122 * 123 * spa_remove() Remove a spa_t from the namespace. This also 124 * frees up any memory associated with the spa_t. 125 * 126 * spa_next() Returns the next spa_t in the system, or the 127 * first if NULL is passed. 128 * 129 * spa_evict_all() Shutdown and remove all spa_t structures in 130 * the system. 131 * 132 * spa_guid_exists() Determine whether a pool/device guid exists. 133 * 134 * The spa_refcount is manipulated using the following functions: 135 * 136 * spa_open_ref() Adds a reference to the given spa_t. Must be 137 * called with spa_namespace_lock held if the 138 * refcount is currently zero. 139 * 140 * spa_close() Remove a reference from the spa_t. This will 141 * not free the spa_t or remove it from the 142 * namespace. No locking is required. 143 * 144 * spa_refcount_zero() Returns true if the refcount is currently 145 * zero. Must be called with spa_namespace_lock 146 * held. 147 * 148 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 149 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 150 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 151 * 152 * To read the configuration, it suffices to hold one of these locks as reader. 153 * To modify the configuration, you must hold all locks as writer. To modify 154 * vdev state without altering the vdev tree's topology (e.g. online/offline), 155 * you must hold SCL_STATE and SCL_ZIO as writer. 156 * 157 * We use these distinct config locks to avoid recursive lock entry. 158 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 159 * block allocations (SCL_ALLOC), which may require reading space maps 160 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 161 * 162 * The spa config locks cannot be normal rwlocks because we need the 163 * ability to hand off ownership. For example, SCL_ZIO is acquired 164 * by the issuing thread and later released by an interrupt thread. 165 * They do, however, obey the usual write-wanted semantics to prevent 166 * writer (i.e. system administrator) starvation. 167 * 168 * The lock acquisition rules are as follows: 169 * 170 * SCL_CONFIG 171 * Protects changes to the vdev tree topology, such as vdev 172 * add/remove/attach/detach. Protects the dirty config list 173 * (spa_config_dirty_list) and the set of spares and l2arc devices. 174 * 175 * SCL_STATE 176 * Protects changes to pool state and vdev state, such as vdev 177 * online/offline/fault/degrade/clear. Protects the dirty state list 178 * (spa_state_dirty_list) and global pool state (spa_state). 179 * 180 * SCL_ALLOC 181 * Protects changes to metaslab groups and classes. 182 * Held as reader by metaslab_alloc() and metaslab_claim(). 183 * 184 * SCL_ZIO 185 * Held by bp-level zios (those which have no io_vd upon entry) 186 * to prevent changes to the vdev tree. The bp-level zio implicitly 187 * protects all of its vdev child zios, which do not hold SCL_ZIO. 188 * 189 * SCL_FREE 190 * Protects changes to metaslab groups and classes. 191 * Held as reader by metaslab_free(). SCL_FREE is distinct from 192 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 193 * blocks in zio_done() while another i/o that holds either 194 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 195 * 196 * SCL_VDEV 197 * Held as reader to prevent changes to the vdev tree during trivial 198 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 199 * other locks, and lower than all of them, to ensure that it's safe 200 * to acquire regardless of caller context. 201 * 202 * In addition, the following rules apply: 203 * 204 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 205 * The lock ordering is SCL_CONFIG > spa_props_lock. 206 * 207 * (b) I/O operations on leaf vdevs. For any zio operation that takes 208 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 209 * or zio_write_phys() -- the caller must ensure that the config cannot 210 * cannot change in the interim, and that the vdev cannot be reopened. 211 * SCL_STATE as reader suffices for both. 212 * 213 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 214 * 215 * spa_vdev_enter() Acquire the namespace lock and the config lock 216 * for writing. 217 * 218 * spa_vdev_exit() Release the config lock, wait for all I/O 219 * to complete, sync the updated configs to the 220 * cache, and release the namespace lock. 221 * 222 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 223 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 224 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 225 * 226 * spa_rename() is also implemented within this file since it requires 227 * manipulation of the namespace. 228 */ 229 230static avl_tree_t spa_namespace_avl; 231kmutex_t spa_namespace_lock; 232static kcondvar_t spa_namespace_cv; 233static int spa_active_count; 234int spa_max_replication_override = SPA_DVAS_PER_BP; 235 236static kmutex_t spa_spare_lock; 237static avl_tree_t spa_spare_avl; 238static kmutex_t spa_l2cache_lock; 239static avl_tree_t spa_l2cache_avl; 240 241kmem_cache_t *spa_buffer_pool; 242int spa_mode_global; 243 244#ifdef ZFS_DEBUG 245/* Everything except dprintf and spa is on by default in debug builds */ 246int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 247#else 248int zfs_flags = 0; 249#endif 250SYSCTL_DECL(_debug); 251TUNABLE_INT("debug.zfs_flags", &zfs_flags); 252SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 253 "ZFS debug flags."); 254 255/* 256 * zfs_recover can be set to nonzero to attempt to recover from 257 * otherwise-fatal errors, typically caused by on-disk corruption. When 258 * set, calls to zfs_panic_recover() will turn into warning messages. 259 * This should only be used as a last resort, as it typically results 260 * in leaked space, or worse. 261 */ 262boolean_t zfs_recover = B_FALSE; 263SYSCTL_DECL(_vfs_zfs); 264TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 265SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 266 "Try to recover from otherwise-fatal errors."); 267 268static int 269sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 270{ 271 int err, val; 272 273 val = zfs_flags; 274 err = sysctl_handle_int(oidp, &val, 0, req); 275 if (err != 0 || req->newptr == NULL) 276 return (err); 277 278 /* 279 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 280 * arc buffers in the system have the necessary additional 281 * checksum data. However, it is safe to disable at any 282 * time. 283 */ 284 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 285 val &= ~ZFS_DEBUG_MODIFY; 286 zfs_flags = val; 287 288 return (0); 289} 290TUNABLE_INT("vfs.zfs.debugflags", &zfs_flags); 291SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags, 292 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 293 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 294SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 295 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 296 sysctl_vfs_zfs_debug_flags, "IU", 297 "Debug flags for ZFS testing (deprecated, see vfs.zfs.debugflags)."); 298 299/* 300 * If destroy encounters an EIO while reading metadata (e.g. indirect 301 * blocks), space referenced by the missing metadata can not be freed. 302 * Normally this causes the background destroy to become "stalled", as 303 * it is unable to make forward progress. While in this stalled state, 304 * all remaining space to free from the error-encountering filesystem is 305 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 306 * permanently leak the space from indirect blocks that can not be read, 307 * and continue to free everything else that it can. 308 * 309 * The default, "stalling" behavior is useful if the storage partially 310 * fails (i.e. some but not all i/os fail), and then later recovers. In 311 * this case, we will be able to continue pool operations while it is 312 * partially failed, and when it recovers, we can continue to free the 313 * space, with no leaks. However, note that this case is actually 314 * fairly rare. 315 * 316 * Typically pools either (a) fail completely (but perhaps temporarily, 317 * e.g. a top-level vdev going offline), or (b) have localized, 318 * permanent errors (e.g. disk returns the wrong data due to bit flip or 319 * firmware bug). In case (a), this setting does not matter because the 320 * pool will be suspended and the sync thread will not be able to make 321 * forward progress regardless. In case (b), because the error is 322 * permanent, the best we can do is leak the minimum amount of space, 323 * which is what setting this flag will do. Therefore, it is reasonable 324 * for this flag to normally be set, but we chose the more conservative 325 * approach of not setting it, so that there is no possibility of 326 * leaking space in the "partial temporary" failure case. 327 */ 328boolean_t zfs_free_leak_on_eio = B_FALSE; 329 330/* 331 * Expiration time in milliseconds. This value has two meanings. First it is 332 * used to determine when the spa_deadman() logic should fire. By default the 333 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 334 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 335 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 336 * in a system panic. 337 */ 338uint64_t zfs_deadman_synctime_ms = 1000000ULL; 339TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 340SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 341 &zfs_deadman_synctime_ms, 0, 342 "Stalled ZFS I/O expiration time in milliseconds"); 343 344/* 345 * Check time in milliseconds. This defines the frequency at which we check 346 * for hung I/O. 347 */ 348uint64_t zfs_deadman_checktime_ms = 5000ULL; 349TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 350SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 351 &zfs_deadman_checktime_ms, 0, 352 "Period of checks for stalled ZFS I/O in milliseconds"); 353 354/* 355 * Default value of -1 for zfs_deadman_enabled is resolved in 356 * zfs_deadman_init() 357 */ 358int zfs_deadman_enabled = -1; 359TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 360SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 361 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 362 363/* 364 * The worst case is single-sector max-parity RAID-Z blocks, in which 365 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 366 * times the size; so just assume that. Add to this the fact that 367 * we can have up to 3 DVAs per bp, and one more factor of 2 because 368 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 369 * the worst case is: 370 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 371 */ 372int spa_asize_inflation = 24; 373TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation); 374SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 375 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 376 377#ifndef illumos 378#ifdef _KERNEL 379static void 380zfs_deadman_init() 381{ 382 /* 383 * If we are not i386 or amd64 or in a virtual machine, 384 * disable ZFS deadman thread by default 385 */ 386 if (zfs_deadman_enabled == -1) { 387#if defined(__amd64__) || defined(__i386__) 388 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 389#else 390 zfs_deadman_enabled = 0; 391#endif 392 } 393} 394#endif /* _KERNEL */ 395#endif /* !illumos */ 396 397/* 398 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 399 * the pool to be consumed. This ensures that we don't run the pool 400 * completely out of space, due to unaccounted changes (e.g. to the MOS). 401 * It also limits the worst-case time to allocate space. If we have 402 * less than this amount of free space, most ZPL operations (e.g. write, 403 * create) will return ENOSPC. 404 * 405 * Certain operations (e.g. file removal, most administrative actions) can 406 * use half the slop space. They will only return ENOSPC if less than half 407 * the slop space is free. Typically, once the pool has less than the slop 408 * space free, the user will use these operations to free up space in the pool. 409 * These are the operations that call dsl_pool_adjustedsize() with the netfree 410 * argument set to TRUE. 411 * 412 * A very restricted set of operations are always permitted, regardless of 413 * the amount of free space. These are the operations that call 414 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 415 * operations result in a net increase in the amount of space used, 416 * it is possible to run the pool completely out of space, causing it to 417 * be permanently read-only. 418 * 419 * Note that on very small pools, the slop space will be larger than 420 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 421 * but we never allow it to be more than half the pool size. 422 * 423 * See also the comments in zfs_space_check_t. 424 */ 425int spa_slop_shift = 5; 426SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 427 &spa_slop_shift, 0, 428 "Shift value of reserved space (1/(2^spa_slop_shift))."); 429uint64_t spa_min_slop = 128 * 1024 * 1024; 430SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, spa_min_slop, CTLFLAG_RWTUN, 431 &spa_min_slop, 0, 432 "Minimal value of reserved space"); 433 434/* 435 * ========================================================================== 436 * SPA config locking 437 * ========================================================================== 438 */ 439static void 440spa_config_lock_init(spa_t *spa) 441{ 442 for (int i = 0; i < SCL_LOCKS; i++) { 443 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 444 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 445 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 446 refcount_create_untracked(&scl->scl_count); 447 scl->scl_writer = NULL; 448 scl->scl_write_wanted = 0; 449 } 450} 451 452static void 453spa_config_lock_destroy(spa_t *spa) 454{ 455 for (int i = 0; i < SCL_LOCKS; i++) { 456 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 457 mutex_destroy(&scl->scl_lock); 458 cv_destroy(&scl->scl_cv); 459 refcount_destroy(&scl->scl_count); 460 ASSERT(scl->scl_writer == NULL); 461 ASSERT(scl->scl_write_wanted == 0); 462 } 463} 464 465int 466spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 467{ 468 for (int i = 0; i < SCL_LOCKS; i++) { 469 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 470 if (!(locks & (1 << i))) 471 continue; 472 mutex_enter(&scl->scl_lock); 473 if (rw == RW_READER) { 474 if (scl->scl_writer || scl->scl_write_wanted) { 475 mutex_exit(&scl->scl_lock); 476 spa_config_exit(spa, locks & ((1 << i) - 1), 477 tag); 478 return (0); 479 } 480 } else { 481 ASSERT(scl->scl_writer != curthread); 482 if (!refcount_is_zero(&scl->scl_count)) { 483 mutex_exit(&scl->scl_lock); 484 spa_config_exit(spa, locks & ((1 << i) - 1), 485 tag); 486 return (0); 487 } 488 scl->scl_writer = curthread; 489 } 490 (void) refcount_add(&scl->scl_count, tag); 491 mutex_exit(&scl->scl_lock); 492 } 493 return (1); 494} 495 496void 497spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 498{ 499 int wlocks_held = 0; 500 501 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 502 503 for (int i = 0; i < SCL_LOCKS; i++) { 504 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 505 if (scl->scl_writer == curthread) 506 wlocks_held |= (1 << i); 507 if (!(locks & (1 << i))) 508 continue; 509 mutex_enter(&scl->scl_lock); 510 if (rw == RW_READER) { 511 while (scl->scl_writer || scl->scl_write_wanted) { 512 cv_wait(&scl->scl_cv, &scl->scl_lock); 513 } 514 } else { 515 ASSERT(scl->scl_writer != curthread); 516 while (!refcount_is_zero(&scl->scl_count)) { 517 scl->scl_write_wanted++; 518 cv_wait(&scl->scl_cv, &scl->scl_lock); 519 scl->scl_write_wanted--; 520 } 521 scl->scl_writer = curthread; 522 } 523 (void) refcount_add(&scl->scl_count, tag); 524 mutex_exit(&scl->scl_lock); 525 } 526 ASSERT(wlocks_held <= locks); 527} 528 529void 530spa_config_exit(spa_t *spa, int locks, void *tag) 531{ 532 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 533 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 534 if (!(locks & (1 << i))) 535 continue; 536 mutex_enter(&scl->scl_lock); 537 ASSERT(!refcount_is_zero(&scl->scl_count)); 538 if (refcount_remove(&scl->scl_count, tag) == 0) { 539 ASSERT(scl->scl_writer == NULL || 540 scl->scl_writer == curthread); 541 scl->scl_writer = NULL; /* OK in either case */ 542 cv_broadcast(&scl->scl_cv); 543 } 544 mutex_exit(&scl->scl_lock); 545 } 546} 547 548int 549spa_config_held(spa_t *spa, int locks, krw_t rw) 550{ 551 int locks_held = 0; 552 553 for (int i = 0; i < SCL_LOCKS; i++) { 554 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 555 if (!(locks & (1 << i))) 556 continue; 557 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 558 (rw == RW_WRITER && scl->scl_writer == curthread)) 559 locks_held |= 1 << i; 560 } 561 562 return (locks_held); 563} 564 565/* 566 * ========================================================================== 567 * SPA namespace functions 568 * ========================================================================== 569 */ 570 571/* 572 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 573 * Returns NULL if no matching spa_t is found. 574 */ 575spa_t * 576spa_lookup(const char *name) 577{ 578 static spa_t search; /* spa_t is large; don't allocate on stack */ 579 spa_t *spa; 580 avl_index_t where; 581 char *cp; 582 583 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 584 585 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 586 587 /* 588 * If it's a full dataset name, figure out the pool name and 589 * just use that. 590 */ 591 cp = strpbrk(search.spa_name, "/@#"); 592 if (cp != NULL) 593 *cp = '\0'; 594 595 spa = avl_find(&spa_namespace_avl, &search, &where); 596 597 return (spa); 598} 599 600/* 601 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 602 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 603 * looking for potentially hung I/Os. 604 */ 605static void 606spa_deadman(void *arg, int pending) 607{ 608 spa_t *spa = arg; 609 610 /* 611 * Disable the deadman timer if the pool is suspended. 612 */ 613 if (spa_suspended(spa)) { 614#ifdef illumos 615 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 616#else 617 /* Nothing. just don't schedule any future callouts. */ 618#endif 619 return; 620 } 621 622 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 623 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 624 ++spa->spa_deadman_calls); 625 if (zfs_deadman_enabled) 626 vdev_deadman(spa->spa_root_vdev); 627#ifdef __FreeBSD__ 628#ifdef _KERNEL 629 callout_schedule(&spa->spa_deadman_cycid, 630 hz * zfs_deadman_checktime_ms / MILLISEC); 631#endif 632#endif 633} 634 635#if defined(__FreeBSD__) && defined(_KERNEL) 636static void 637spa_deadman_timeout(void *arg) 638{ 639 spa_t *spa = arg; 640 641 taskqueue_enqueue(taskqueue_thread, &spa->spa_deadman_task); 642} 643#endif 644 645/* 646 * Create an uninitialized spa_t with the given name. Requires 647 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 648 * exist by calling spa_lookup() first. 649 */ 650spa_t * 651spa_add(const char *name, nvlist_t *config, const char *altroot) 652{ 653 spa_t *spa; 654 spa_config_dirent_t *dp; 655#ifdef illumos 656 cyc_handler_t hdlr; 657 cyc_time_t when; 658#endif 659 660 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 661 662 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 663 664 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 665 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 666 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 667 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 668 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 669 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 670 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 671 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 672 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 673 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 674 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 675 mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL); 676 677 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 678 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 679 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 680 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 681 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 682 683 for (int t = 0; t < TXG_SIZE; t++) 684 bplist_create(&spa->spa_free_bplist[t]); 685 686 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 687 spa->spa_state = POOL_STATE_UNINITIALIZED; 688 spa->spa_freeze_txg = UINT64_MAX; 689 spa->spa_final_txg = UINT64_MAX; 690 spa->spa_load_max_txg = UINT64_MAX; 691 spa->spa_proc = &p0; 692 spa->spa_proc_state = SPA_PROC_NONE; 693 694#ifdef illumos 695 hdlr.cyh_func = spa_deadman; 696 hdlr.cyh_arg = spa; 697 hdlr.cyh_level = CY_LOW_LEVEL; 698#endif 699 700 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 701 702#ifdef illumos 703 /* 704 * This determines how often we need to check for hung I/Os after 705 * the cyclic has already fired. Since checking for hung I/Os is 706 * an expensive operation we don't want to check too frequently. 707 * Instead wait for 5 seconds before checking again. 708 */ 709 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 710 when.cyt_when = CY_INFINITY; 711 mutex_enter(&cpu_lock); 712 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 713 mutex_exit(&cpu_lock); 714#else /* !illumos */ 715#ifdef _KERNEL 716 /* 717 * callout(9) does not provide a way to initialize a callout with 718 * a function and an argument, so we use callout_reset() to schedule 719 * the callout in the very distant future. Even if that event ever 720 * fires, it should be okayas we won't have any active zio-s. 721 * But normally spa_sync() will reschedule the callout with a proper 722 * timeout. 723 * callout(9) does not allow the callback function to sleep but 724 * vdev_deadman() needs to acquire vq_lock and illumos mutexes are 725 * emulated using sx(9). For this reason spa_deadman_timeout() 726 * will schedule spa_deadman() as task on a taskqueue that allows 727 * sleeping. 728 */ 729 TASK_INIT(&spa->spa_deadman_task, 0, spa_deadman, spa); 730 callout_init(&spa->spa_deadman_cycid, 1); 731 callout_reset_sbt(&spa->spa_deadman_cycid, SBT_MAX, 0, 732 spa_deadman_timeout, spa, 0); 733#endif 734#endif 735 refcount_create(&spa->spa_refcount); 736 spa_config_lock_init(spa); 737 738 avl_add(&spa_namespace_avl, spa); 739 740 /* 741 * Set the alternate root, if there is one. 742 */ 743 if (altroot) { 744 spa->spa_root = spa_strdup(altroot); 745 spa_active_count++; 746 } 747 748 avl_create(&spa->spa_alloc_tree, zio_timestamp_compare, 749 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 750 751 /* 752 * Every pool starts with the default cachefile 753 */ 754 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 755 offsetof(spa_config_dirent_t, scd_link)); 756 757 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 758 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 759 list_insert_head(&spa->spa_config_list, dp); 760 761 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 762 KM_SLEEP) == 0); 763 764 if (config != NULL) { 765 nvlist_t *features; 766 767 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 768 &features) == 0) { 769 VERIFY(nvlist_dup(features, &spa->spa_label_features, 770 0) == 0); 771 } 772 773 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 774 } 775 776 if (spa->spa_label_features == NULL) { 777 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 778 KM_SLEEP) == 0); 779 } 780 781 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 782 783 spa->spa_min_ashift = INT_MAX; 784 spa->spa_max_ashift = 0; 785 786 /* 787 * As a pool is being created, treat all features as disabled by 788 * setting SPA_FEATURE_DISABLED for all entries in the feature 789 * refcount cache. 790 */ 791 for (int i = 0; i < SPA_FEATURES; i++) { 792 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 793 } 794 795 return (spa); 796} 797 798/* 799 * Removes a spa_t from the namespace, freeing up any memory used. Requires 800 * spa_namespace_lock. This is called only after the spa_t has been closed and 801 * deactivated. 802 */ 803void 804spa_remove(spa_t *spa) 805{ 806 spa_config_dirent_t *dp; 807 808 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 809 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 810 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 811 812 nvlist_free(spa->spa_config_splitting); 813 814 avl_remove(&spa_namespace_avl, spa); 815 cv_broadcast(&spa_namespace_cv); 816 817 if (spa->spa_root) { 818 spa_strfree(spa->spa_root); 819 spa_active_count--; 820 } 821 822 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 823 list_remove(&spa->spa_config_list, dp); 824 if (dp->scd_path != NULL) 825 spa_strfree(dp->scd_path); 826 kmem_free(dp, sizeof (spa_config_dirent_t)); 827 } 828 829 avl_destroy(&spa->spa_alloc_tree); 830 list_destroy(&spa->spa_config_list); 831 832 nvlist_free(spa->spa_label_features); 833 nvlist_free(spa->spa_load_info); 834 spa_config_set(spa, NULL); 835 836#ifdef illumos 837 mutex_enter(&cpu_lock); 838 if (spa->spa_deadman_cycid != CYCLIC_NONE) 839 cyclic_remove(spa->spa_deadman_cycid); 840 mutex_exit(&cpu_lock); 841 spa->spa_deadman_cycid = CYCLIC_NONE; 842#else /* !illumos */ 843#ifdef _KERNEL 844 callout_drain(&spa->spa_deadman_cycid); 845 taskqueue_drain(taskqueue_thread, &spa->spa_deadman_task); 846#endif 847#endif 848 849 refcount_destroy(&spa->spa_refcount); 850 851 spa_config_lock_destroy(spa); 852 853 for (int t = 0; t < TXG_SIZE; t++) 854 bplist_destroy(&spa->spa_free_bplist[t]); 855 856 zio_checksum_templates_free(spa); 857 858 cv_destroy(&spa->spa_async_cv); 859 cv_destroy(&spa->spa_evicting_os_cv); 860 cv_destroy(&spa->spa_proc_cv); 861 cv_destroy(&spa->spa_scrub_io_cv); 862 cv_destroy(&spa->spa_suspend_cv); 863 864 mutex_destroy(&spa->spa_alloc_lock); 865 mutex_destroy(&spa->spa_async_lock); 866 mutex_destroy(&spa->spa_errlist_lock); 867 mutex_destroy(&spa->spa_errlog_lock); 868 mutex_destroy(&spa->spa_evicting_os_lock); 869 mutex_destroy(&spa->spa_history_lock); 870 mutex_destroy(&spa->spa_proc_lock); 871 mutex_destroy(&spa->spa_props_lock); 872 mutex_destroy(&spa->spa_cksum_tmpls_lock); 873 mutex_destroy(&spa->spa_scrub_lock); 874 mutex_destroy(&spa->spa_suspend_lock); 875 mutex_destroy(&spa->spa_vdev_top_lock); 876 877 kmem_free(spa, sizeof (spa_t)); 878} 879 880/* 881 * Given a pool, return the next pool in the namespace, or NULL if there is 882 * none. If 'prev' is NULL, return the first pool. 883 */ 884spa_t * 885spa_next(spa_t *prev) 886{ 887 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 888 889 if (prev) 890 return (AVL_NEXT(&spa_namespace_avl, prev)); 891 else 892 return (avl_first(&spa_namespace_avl)); 893} 894 895/* 896 * ========================================================================== 897 * SPA refcount functions 898 * ========================================================================== 899 */ 900 901/* 902 * Add a reference to the given spa_t. Must have at least one reference, or 903 * have the namespace lock held. 904 */ 905void 906spa_open_ref(spa_t *spa, void *tag) 907{ 908 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 909 MUTEX_HELD(&spa_namespace_lock)); 910 (void) refcount_add(&spa->spa_refcount, tag); 911} 912 913/* 914 * Remove a reference to the given spa_t. Must have at least one reference, or 915 * have the namespace lock held. 916 */ 917void 918spa_close(spa_t *spa, void *tag) 919{ 920 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 921 MUTEX_HELD(&spa_namespace_lock)); 922 (void) refcount_remove(&spa->spa_refcount, tag); 923} 924 925/* 926 * Remove a reference to the given spa_t held by a dsl dir that is 927 * being asynchronously released. Async releases occur from a taskq 928 * performing eviction of dsl datasets and dirs. The namespace lock 929 * isn't held and the hold by the object being evicted may contribute to 930 * spa_minref (e.g. dataset or directory released during pool export), 931 * so the asserts in spa_close() do not apply. 932 */ 933void 934spa_async_close(spa_t *spa, void *tag) 935{ 936 (void) refcount_remove(&spa->spa_refcount, tag); 937} 938 939/* 940 * Check to see if the spa refcount is zero. Must be called with 941 * spa_namespace_lock held. We really compare against spa_minref, which is the 942 * number of references acquired when opening a pool 943 */ 944boolean_t 945spa_refcount_zero(spa_t *spa) 946{ 947 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 948 949 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 950} 951 952/* 953 * ========================================================================== 954 * SPA spare and l2cache tracking 955 * ========================================================================== 956 */ 957 958/* 959 * Hot spares and cache devices are tracked using the same code below, 960 * for 'auxiliary' devices. 961 */ 962 963typedef struct spa_aux { 964 uint64_t aux_guid; 965 uint64_t aux_pool; 966 avl_node_t aux_avl; 967 int aux_count; 968} spa_aux_t; 969 970static int 971spa_aux_compare(const void *a, const void *b) 972{ 973 const spa_aux_t *sa = a; 974 const spa_aux_t *sb = b; 975 976 if (sa->aux_guid < sb->aux_guid) 977 return (-1); 978 else if (sa->aux_guid > sb->aux_guid) 979 return (1); 980 else 981 return (0); 982} 983 984void 985spa_aux_add(vdev_t *vd, avl_tree_t *avl) 986{ 987 avl_index_t where; 988 spa_aux_t search; 989 spa_aux_t *aux; 990 991 search.aux_guid = vd->vdev_guid; 992 if ((aux = avl_find(avl, &search, &where)) != NULL) { 993 aux->aux_count++; 994 } else { 995 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 996 aux->aux_guid = vd->vdev_guid; 997 aux->aux_count = 1; 998 avl_insert(avl, aux, where); 999 } 1000} 1001 1002void 1003spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 1004{ 1005 spa_aux_t search; 1006 spa_aux_t *aux; 1007 avl_index_t where; 1008 1009 search.aux_guid = vd->vdev_guid; 1010 aux = avl_find(avl, &search, &where); 1011 1012 ASSERT(aux != NULL); 1013 1014 if (--aux->aux_count == 0) { 1015 avl_remove(avl, aux); 1016 kmem_free(aux, sizeof (spa_aux_t)); 1017 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 1018 aux->aux_pool = 0ULL; 1019 } 1020} 1021 1022boolean_t 1023spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 1024{ 1025 spa_aux_t search, *found; 1026 1027 search.aux_guid = guid; 1028 found = avl_find(avl, &search, NULL); 1029 1030 if (pool) { 1031 if (found) 1032 *pool = found->aux_pool; 1033 else 1034 *pool = 0ULL; 1035 } 1036 1037 if (refcnt) { 1038 if (found) 1039 *refcnt = found->aux_count; 1040 else 1041 *refcnt = 0; 1042 } 1043 1044 return (found != NULL); 1045} 1046 1047void 1048spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1049{ 1050 spa_aux_t search, *found; 1051 avl_index_t where; 1052 1053 search.aux_guid = vd->vdev_guid; 1054 found = avl_find(avl, &search, &where); 1055 ASSERT(found != NULL); 1056 ASSERT(found->aux_pool == 0ULL); 1057 1058 found->aux_pool = spa_guid(vd->vdev_spa); 1059} 1060 1061/* 1062 * Spares are tracked globally due to the following constraints: 1063 * 1064 * - A spare may be part of multiple pools. 1065 * - A spare may be added to a pool even if it's actively in use within 1066 * another pool. 1067 * - A spare in use in any pool can only be the source of a replacement if 1068 * the target is a spare in the same pool. 1069 * 1070 * We keep track of all spares on the system through the use of a reference 1071 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1072 * spare, then we bump the reference count in the AVL tree. In addition, we set 1073 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1074 * inactive). When a spare is made active (used to replace a device in the 1075 * pool), we also keep track of which pool its been made a part of. 1076 * 1077 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1078 * called under the spa_namespace lock as part of vdev reconfiguration. The 1079 * separate spare lock exists for the status query path, which does not need to 1080 * be completely consistent with respect to other vdev configuration changes. 1081 */ 1082 1083static int 1084spa_spare_compare(const void *a, const void *b) 1085{ 1086 return (spa_aux_compare(a, b)); 1087} 1088 1089void 1090spa_spare_add(vdev_t *vd) 1091{ 1092 mutex_enter(&spa_spare_lock); 1093 ASSERT(!vd->vdev_isspare); 1094 spa_aux_add(vd, &spa_spare_avl); 1095 vd->vdev_isspare = B_TRUE; 1096 mutex_exit(&spa_spare_lock); 1097} 1098 1099void 1100spa_spare_remove(vdev_t *vd) 1101{ 1102 mutex_enter(&spa_spare_lock); 1103 ASSERT(vd->vdev_isspare); 1104 spa_aux_remove(vd, &spa_spare_avl); 1105 vd->vdev_isspare = B_FALSE; 1106 mutex_exit(&spa_spare_lock); 1107} 1108 1109boolean_t 1110spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1111{ 1112 boolean_t found; 1113 1114 mutex_enter(&spa_spare_lock); 1115 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1116 mutex_exit(&spa_spare_lock); 1117 1118 return (found); 1119} 1120 1121void 1122spa_spare_activate(vdev_t *vd) 1123{ 1124 mutex_enter(&spa_spare_lock); 1125 ASSERT(vd->vdev_isspare); 1126 spa_aux_activate(vd, &spa_spare_avl); 1127 mutex_exit(&spa_spare_lock); 1128} 1129 1130/* 1131 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1132 * Cache devices currently only support one pool per cache device, and so 1133 * for these devices the aux reference count is currently unused beyond 1. 1134 */ 1135 1136static int 1137spa_l2cache_compare(const void *a, const void *b) 1138{ 1139 return (spa_aux_compare(a, b)); 1140} 1141 1142void 1143spa_l2cache_add(vdev_t *vd) 1144{ 1145 mutex_enter(&spa_l2cache_lock); 1146 ASSERT(!vd->vdev_isl2cache); 1147 spa_aux_add(vd, &spa_l2cache_avl); 1148 vd->vdev_isl2cache = B_TRUE; 1149 mutex_exit(&spa_l2cache_lock); 1150} 1151 1152void 1153spa_l2cache_remove(vdev_t *vd) 1154{ 1155 mutex_enter(&spa_l2cache_lock); 1156 ASSERT(vd->vdev_isl2cache); 1157 spa_aux_remove(vd, &spa_l2cache_avl); 1158 vd->vdev_isl2cache = B_FALSE; 1159 mutex_exit(&spa_l2cache_lock); 1160} 1161 1162boolean_t 1163spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1164{ 1165 boolean_t found; 1166 1167 mutex_enter(&spa_l2cache_lock); 1168 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1169 mutex_exit(&spa_l2cache_lock); 1170 1171 return (found); 1172} 1173 1174void 1175spa_l2cache_activate(vdev_t *vd) 1176{ 1177 mutex_enter(&spa_l2cache_lock); 1178 ASSERT(vd->vdev_isl2cache); 1179 spa_aux_activate(vd, &spa_l2cache_avl); 1180 mutex_exit(&spa_l2cache_lock); 1181} 1182 1183/* 1184 * ========================================================================== 1185 * SPA vdev locking 1186 * ========================================================================== 1187 */ 1188 1189/* 1190 * Lock the given spa_t for the purpose of adding or removing a vdev. 1191 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1192 * It returns the next transaction group for the spa_t. 1193 */ 1194uint64_t 1195spa_vdev_enter(spa_t *spa) 1196{ 1197 mutex_enter(&spa->spa_vdev_top_lock); 1198 mutex_enter(&spa_namespace_lock); 1199 return (spa_vdev_config_enter(spa)); 1200} 1201 1202/* 1203 * Internal implementation for spa_vdev_enter(). Used when a vdev 1204 * operation requires multiple syncs (i.e. removing a device) while 1205 * keeping the spa_namespace_lock held. 1206 */ 1207uint64_t 1208spa_vdev_config_enter(spa_t *spa) 1209{ 1210 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1211 1212 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1213 1214 return (spa_last_synced_txg(spa) + 1); 1215} 1216 1217/* 1218 * Used in combination with spa_vdev_config_enter() to allow the syncing 1219 * of multiple transactions without releasing the spa_namespace_lock. 1220 */ 1221void 1222spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1223{ 1224 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1225 1226 int config_changed = B_FALSE; 1227 1228 ASSERT(txg > spa_last_synced_txg(spa)); 1229 1230 spa->spa_pending_vdev = NULL; 1231 1232 /* 1233 * Reassess the DTLs. 1234 */ 1235 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1236 1237 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1238 config_changed = B_TRUE; 1239 spa->spa_config_generation++; 1240 } 1241 1242 /* 1243 * Verify the metaslab classes. 1244 */ 1245 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1246 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1247 1248 spa_config_exit(spa, SCL_ALL, spa); 1249 1250 /* 1251 * Panic the system if the specified tag requires it. This 1252 * is useful for ensuring that configurations are updated 1253 * transactionally. 1254 */ 1255 if (zio_injection_enabled) 1256 zio_handle_panic_injection(spa, tag, 0); 1257 1258 /* 1259 * Note: this txg_wait_synced() is important because it ensures 1260 * that there won't be more than one config change per txg. 1261 * This allows us to use the txg as the generation number. 1262 */ 1263 if (error == 0) 1264 txg_wait_synced(spa->spa_dsl_pool, txg); 1265 1266 if (vd != NULL) { 1267 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1268 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1269 vdev_free(vd); 1270 spa_config_exit(spa, SCL_ALL, spa); 1271 } 1272 1273 /* 1274 * If the config changed, update the config cache. 1275 */ 1276 if (config_changed) 1277 spa_config_sync(spa, B_FALSE, B_TRUE); 1278} 1279 1280/* 1281 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1282 * locking of spa_vdev_enter(), we also want make sure the transactions have 1283 * synced to disk, and then update the global configuration cache with the new 1284 * information. 1285 */ 1286int 1287spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1288{ 1289 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1290 mutex_exit(&spa_namespace_lock); 1291 mutex_exit(&spa->spa_vdev_top_lock); 1292 1293 return (error); 1294} 1295 1296/* 1297 * Lock the given spa_t for the purpose of changing vdev state. 1298 */ 1299void 1300spa_vdev_state_enter(spa_t *spa, int oplocks) 1301{ 1302 int locks = SCL_STATE_ALL | oplocks; 1303 1304 /* 1305 * Root pools may need to read of the underlying devfs filesystem 1306 * when opening up a vdev. Unfortunately if we're holding the 1307 * SCL_ZIO lock it will result in a deadlock when we try to issue 1308 * the read from the root filesystem. Instead we "prefetch" 1309 * the associated vnodes that we need prior to opening the 1310 * underlying devices and cache them so that we can prevent 1311 * any I/O when we are doing the actual open. 1312 */ 1313 if (spa_is_root(spa)) { 1314 int low = locks & ~(SCL_ZIO - 1); 1315 int high = locks & ~low; 1316 1317 spa_config_enter(spa, high, spa, RW_WRITER); 1318 vdev_hold(spa->spa_root_vdev); 1319 spa_config_enter(spa, low, spa, RW_WRITER); 1320 } else { 1321 spa_config_enter(spa, locks, spa, RW_WRITER); 1322 } 1323 spa->spa_vdev_locks = locks; 1324} 1325 1326int 1327spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1328{ 1329 boolean_t config_changed = B_FALSE; 1330 1331 if (vd != NULL || error == 0) 1332 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1333 0, 0, B_FALSE); 1334 1335 if (vd != NULL) { 1336 vdev_state_dirty(vd->vdev_top); 1337 config_changed = B_TRUE; 1338 spa->spa_config_generation++; 1339 } 1340 1341 if (spa_is_root(spa)) 1342 vdev_rele(spa->spa_root_vdev); 1343 1344 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1345 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1346 1347 /* 1348 * If anything changed, wait for it to sync. This ensures that, 1349 * from the system administrator's perspective, zpool(1M) commands 1350 * are synchronous. This is important for things like zpool offline: 1351 * when the command completes, you expect no further I/O from ZFS. 1352 */ 1353 if (vd != NULL) 1354 txg_wait_synced(spa->spa_dsl_pool, 0); 1355 1356 /* 1357 * If the config changed, update the config cache. 1358 */ 1359 if (config_changed) { 1360 mutex_enter(&spa_namespace_lock); 1361 spa_config_sync(spa, B_FALSE, B_TRUE); 1362 mutex_exit(&spa_namespace_lock); 1363 } 1364 1365 return (error); 1366} 1367 1368/* 1369 * ========================================================================== 1370 * Miscellaneous functions 1371 * ========================================================================== 1372 */ 1373 1374void 1375spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1376{ 1377 if (!nvlist_exists(spa->spa_label_features, feature)) { 1378 fnvlist_add_boolean(spa->spa_label_features, feature); 1379 /* 1380 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1381 * dirty the vdev config because lock SCL_CONFIG is not held. 1382 * Thankfully, in this case we don't need to dirty the config 1383 * because it will be written out anyway when we finish 1384 * creating the pool. 1385 */ 1386 if (tx->tx_txg != TXG_INITIAL) 1387 vdev_config_dirty(spa->spa_root_vdev); 1388 } 1389} 1390 1391void 1392spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1393{ 1394 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1395 vdev_config_dirty(spa->spa_root_vdev); 1396} 1397 1398/* 1399 * Rename a spa_t. 1400 */ 1401int 1402spa_rename(const char *name, const char *newname) 1403{ 1404 spa_t *spa; 1405 int err; 1406 1407 /* 1408 * Lookup the spa_t and grab the config lock for writing. We need to 1409 * actually open the pool so that we can sync out the necessary labels. 1410 * It's OK to call spa_open() with the namespace lock held because we 1411 * allow recursive calls for other reasons. 1412 */ 1413 mutex_enter(&spa_namespace_lock); 1414 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1415 mutex_exit(&spa_namespace_lock); 1416 return (err); 1417 } 1418 1419 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1420 1421 avl_remove(&spa_namespace_avl, spa); 1422 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1423 avl_add(&spa_namespace_avl, spa); 1424 1425 /* 1426 * Sync all labels to disk with the new names by marking the root vdev 1427 * dirty and waiting for it to sync. It will pick up the new pool name 1428 * during the sync. 1429 */ 1430 vdev_config_dirty(spa->spa_root_vdev); 1431 1432 spa_config_exit(spa, SCL_ALL, FTAG); 1433 1434 txg_wait_synced(spa->spa_dsl_pool, 0); 1435 1436 /* 1437 * Sync the updated config cache. 1438 */ 1439 spa_config_sync(spa, B_FALSE, B_TRUE); 1440 1441 spa_close(spa, FTAG); 1442 1443 mutex_exit(&spa_namespace_lock); 1444 1445 return (0); 1446} 1447 1448/* 1449 * Return the spa_t associated with given pool_guid, if it exists. If 1450 * device_guid is non-zero, determine whether the pool exists *and* contains 1451 * a device with the specified device_guid. 1452 */ 1453spa_t * 1454spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1455{ 1456 spa_t *spa; 1457 avl_tree_t *t = &spa_namespace_avl; 1458 1459 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1460 1461 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1462 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1463 continue; 1464 if (spa->spa_root_vdev == NULL) 1465 continue; 1466 if (spa_guid(spa) == pool_guid) { 1467 if (device_guid == 0) 1468 break; 1469 1470 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1471 device_guid) != NULL) 1472 break; 1473 1474 /* 1475 * Check any devices we may be in the process of adding. 1476 */ 1477 if (spa->spa_pending_vdev) { 1478 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1479 device_guid) != NULL) 1480 break; 1481 } 1482 } 1483 } 1484 1485 return (spa); 1486} 1487 1488/* 1489 * Determine whether a pool with the given pool_guid exists. 1490 */ 1491boolean_t 1492spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1493{ 1494 return (spa_by_guid(pool_guid, device_guid) != NULL); 1495} 1496 1497char * 1498spa_strdup(const char *s) 1499{ 1500 size_t len; 1501 char *new; 1502 1503 len = strlen(s); 1504 new = kmem_alloc(len + 1, KM_SLEEP); 1505 bcopy(s, new, len); 1506 new[len] = '\0'; 1507 1508 return (new); 1509} 1510 1511void 1512spa_strfree(char *s) 1513{ 1514 kmem_free(s, strlen(s) + 1); 1515} 1516 1517uint64_t 1518spa_get_random(uint64_t range) 1519{ 1520 uint64_t r; 1521 1522 ASSERT(range != 0); 1523 1524 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1525 1526 return (r % range); 1527} 1528 1529uint64_t 1530spa_generate_guid(spa_t *spa) 1531{ 1532 uint64_t guid = spa_get_random(-1ULL); 1533 1534 if (spa != NULL) { 1535 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1536 guid = spa_get_random(-1ULL); 1537 } else { 1538 while (guid == 0 || spa_guid_exists(guid, 0)) 1539 guid = spa_get_random(-1ULL); 1540 } 1541 1542 return (guid); 1543} 1544 1545void 1546snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1547{ 1548 char type[256]; 1549 char *checksum = NULL; 1550 char *compress = NULL; 1551 1552 if (bp != NULL) { 1553 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1554 dmu_object_byteswap_t bswap = 1555 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1556 (void) snprintf(type, sizeof (type), "bswap %s %s", 1557 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1558 "metadata" : "data", 1559 dmu_ot_byteswap[bswap].ob_name); 1560 } else { 1561 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1562 sizeof (type)); 1563 } 1564 if (!BP_IS_EMBEDDED(bp)) { 1565 checksum = 1566 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1567 } 1568 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1569 } 1570 1571 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1572 compress); 1573} 1574 1575void 1576spa_freeze(spa_t *spa) 1577{ 1578 uint64_t freeze_txg = 0; 1579 1580 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1581 if (spa->spa_freeze_txg == UINT64_MAX) { 1582 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1583 spa->spa_freeze_txg = freeze_txg; 1584 } 1585 spa_config_exit(spa, SCL_ALL, FTAG); 1586 if (freeze_txg != 0) 1587 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1588} 1589 1590void 1591zfs_panic_recover(const char *fmt, ...) 1592{ 1593 va_list adx; 1594 1595 va_start(adx, fmt); 1596 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1597 va_end(adx); 1598} 1599 1600/* 1601 * This is a stripped-down version of strtoull, suitable only for converting 1602 * lowercase hexadecimal numbers that don't overflow. 1603 */ 1604uint64_t 1605zfs_strtonum(const char *str, char **nptr) 1606{ 1607 uint64_t val = 0; 1608 char c; 1609 int digit; 1610 1611 while ((c = *str) != '\0') { 1612 if (c >= '0' && c <= '9') 1613 digit = c - '0'; 1614 else if (c >= 'a' && c <= 'f') 1615 digit = 10 + c - 'a'; 1616 else 1617 break; 1618 1619 val *= 16; 1620 val += digit; 1621 1622 str++; 1623 } 1624 1625 if (nptr) 1626 *nptr = (char *)str; 1627 1628 return (val); 1629} 1630 1631/* 1632 * ========================================================================== 1633 * Accessor functions 1634 * ========================================================================== 1635 */ 1636 1637boolean_t 1638spa_shutting_down(spa_t *spa) 1639{ 1640 return (spa->spa_async_suspended); 1641} 1642 1643dsl_pool_t * 1644spa_get_dsl(spa_t *spa) 1645{ 1646 return (spa->spa_dsl_pool); 1647} 1648 1649boolean_t 1650spa_is_initializing(spa_t *spa) 1651{ 1652 return (spa->spa_is_initializing); 1653} 1654 1655blkptr_t * 1656spa_get_rootblkptr(spa_t *spa) 1657{ 1658 return (&spa->spa_ubsync.ub_rootbp); 1659} 1660 1661void 1662spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1663{ 1664 spa->spa_uberblock.ub_rootbp = *bp; 1665} 1666 1667void 1668spa_altroot(spa_t *spa, char *buf, size_t buflen) 1669{ 1670 if (spa->spa_root == NULL) 1671 buf[0] = '\0'; 1672 else 1673 (void) strncpy(buf, spa->spa_root, buflen); 1674} 1675 1676int 1677spa_sync_pass(spa_t *spa) 1678{ 1679 return (spa->spa_sync_pass); 1680} 1681 1682char * 1683spa_name(spa_t *spa) 1684{ 1685 return (spa->spa_name); 1686} 1687 1688uint64_t 1689spa_guid(spa_t *spa) 1690{ 1691 dsl_pool_t *dp = spa_get_dsl(spa); 1692 uint64_t guid; 1693 1694 /* 1695 * If we fail to parse the config during spa_load(), we can go through 1696 * the error path (which posts an ereport) and end up here with no root 1697 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1698 * this case. 1699 */ 1700 if (spa->spa_root_vdev == NULL) 1701 return (spa->spa_config_guid); 1702 1703 guid = spa->spa_last_synced_guid != 0 ? 1704 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1705 1706 /* 1707 * Return the most recently synced out guid unless we're 1708 * in syncing context. 1709 */ 1710 if (dp && dsl_pool_sync_context(dp)) 1711 return (spa->spa_root_vdev->vdev_guid); 1712 else 1713 return (guid); 1714} 1715 1716uint64_t 1717spa_load_guid(spa_t *spa) 1718{ 1719 /* 1720 * This is a GUID that exists solely as a reference for the 1721 * purposes of the arc. It is generated at load time, and 1722 * is never written to persistent storage. 1723 */ 1724 return (spa->spa_load_guid); 1725} 1726 1727uint64_t 1728spa_last_synced_txg(spa_t *spa) 1729{ 1730 return (spa->spa_ubsync.ub_txg); 1731} 1732 1733uint64_t 1734spa_first_txg(spa_t *spa) 1735{ 1736 return (spa->spa_first_txg); 1737} 1738 1739uint64_t 1740spa_syncing_txg(spa_t *spa) 1741{ 1742 return (spa->spa_syncing_txg); 1743} 1744 1745pool_state_t 1746spa_state(spa_t *spa) 1747{ 1748 return (spa->spa_state); 1749} 1750 1751spa_load_state_t 1752spa_load_state(spa_t *spa) 1753{ 1754 return (spa->spa_load_state); 1755} 1756 1757uint64_t 1758spa_freeze_txg(spa_t *spa) 1759{ 1760 return (spa->spa_freeze_txg); 1761} 1762 1763/* ARGSUSED */ 1764uint64_t 1765spa_get_asize(spa_t *spa, uint64_t lsize) 1766{ 1767 return (lsize * spa_asize_inflation); 1768} 1769 1770/* 1771 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1772 * or at least 128MB, unless that would cause it to be more than half the 1773 * pool size. 1774 * 1775 * See the comment above spa_slop_shift for details. 1776 */ 1777uint64_t 1778spa_get_slop_space(spa_t *spa) 1779{ 1780 uint64_t space = spa_get_dspace(spa); 1781 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1782} 1783 1784uint64_t 1785spa_get_dspace(spa_t *spa) 1786{ 1787 return (spa->spa_dspace); 1788} 1789 1790void 1791spa_update_dspace(spa_t *spa) 1792{ 1793 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1794 ddt_get_dedup_dspace(spa); 1795} 1796 1797/* 1798 * Return the failure mode that has been set to this pool. The default 1799 * behavior will be to block all I/Os when a complete failure occurs. 1800 */ 1801uint8_t 1802spa_get_failmode(spa_t *spa) 1803{ 1804 return (spa->spa_failmode); 1805} 1806 1807boolean_t 1808spa_suspended(spa_t *spa) 1809{ 1810 return (spa->spa_suspended); 1811} 1812 1813uint64_t 1814spa_version(spa_t *spa) 1815{ 1816 return (spa->spa_ubsync.ub_version); 1817} 1818 1819boolean_t 1820spa_deflate(spa_t *spa) 1821{ 1822 return (spa->spa_deflate); 1823} 1824 1825metaslab_class_t * 1826spa_normal_class(spa_t *spa) 1827{ 1828 return (spa->spa_normal_class); 1829} 1830 1831metaslab_class_t * 1832spa_log_class(spa_t *spa) 1833{ 1834 return (spa->spa_log_class); 1835} 1836 1837void 1838spa_evicting_os_register(spa_t *spa, objset_t *os) 1839{ 1840 mutex_enter(&spa->spa_evicting_os_lock); 1841 list_insert_head(&spa->spa_evicting_os_list, os); 1842 mutex_exit(&spa->spa_evicting_os_lock); 1843} 1844 1845void 1846spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1847{ 1848 mutex_enter(&spa->spa_evicting_os_lock); 1849 list_remove(&spa->spa_evicting_os_list, os); 1850 cv_broadcast(&spa->spa_evicting_os_cv); 1851 mutex_exit(&spa->spa_evicting_os_lock); 1852} 1853 1854void 1855spa_evicting_os_wait(spa_t *spa) 1856{ 1857 mutex_enter(&spa->spa_evicting_os_lock); 1858 while (!list_is_empty(&spa->spa_evicting_os_list)) 1859 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1860 mutex_exit(&spa->spa_evicting_os_lock); 1861 1862 dmu_buf_user_evict_wait(); 1863} 1864 1865int 1866spa_max_replication(spa_t *spa) 1867{ 1868 /* 1869 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1870 * handle BPs with more than one DVA allocated. Set our max 1871 * replication level accordingly. 1872 */ 1873 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1874 return (1); 1875 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1876} 1877 1878int 1879spa_prev_software_version(spa_t *spa) 1880{ 1881 return (spa->spa_prev_software_version); 1882} 1883 1884uint64_t 1885spa_deadman_synctime(spa_t *spa) 1886{ 1887 return (spa->spa_deadman_synctime); 1888} 1889 1890uint64_t 1891dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1892{ 1893 uint64_t asize = DVA_GET_ASIZE(dva); 1894 uint64_t dsize = asize; 1895 1896 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1897 1898 if (asize != 0 && spa->spa_deflate) { 1899 uint64_t vdev = DVA_GET_VDEV(dva); 1900 vdev_t *vd = vdev_lookup_top(spa, vdev); 1901 if (vd == NULL) { 1902 panic( 1903 "dva_get_dsize_sync(): bad DVA %llu:%llu", 1904 (u_longlong_t)vdev, (u_longlong_t)asize); 1905 } 1906 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1907 } 1908 1909 return (dsize); 1910} 1911 1912uint64_t 1913bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1914{ 1915 uint64_t dsize = 0; 1916 1917 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1918 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1919 1920 return (dsize); 1921} 1922 1923uint64_t 1924bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1925{ 1926 uint64_t dsize = 0; 1927 1928 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1929 1930 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1931 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1932 1933 spa_config_exit(spa, SCL_VDEV, FTAG); 1934 1935 return (dsize); 1936} 1937 1938/* 1939 * ========================================================================== 1940 * Initialization and Termination 1941 * ========================================================================== 1942 */ 1943 1944static int 1945spa_name_compare(const void *a1, const void *a2) 1946{ 1947 const spa_t *s1 = a1; 1948 const spa_t *s2 = a2; 1949 int s; 1950 1951 s = strcmp(s1->spa_name, s2->spa_name); 1952 if (s > 0) 1953 return (1); 1954 if (s < 0) 1955 return (-1); 1956 return (0); 1957} 1958 1959int 1960spa_busy(void) 1961{ 1962 return (spa_active_count); 1963} 1964 1965void 1966spa_boot_init() 1967{ 1968 spa_config_load(); 1969} 1970 1971#ifdef _KERNEL 1972EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1973#endif 1974 1975void 1976spa_init(int mode) 1977{ 1978 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1979 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1980 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1981 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1982 1983 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1984 offsetof(spa_t, spa_avl)); 1985 1986 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1987 offsetof(spa_aux_t, aux_avl)); 1988 1989 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1990 offsetof(spa_aux_t, aux_avl)); 1991 1992 spa_mode_global = mode; 1993 1994#ifdef illumos 1995#ifdef _KERNEL 1996 spa_arch_init(); 1997#else 1998 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1999 arc_procfd = open("/proc/self/ctl", O_WRONLY); 2000 if (arc_procfd == -1) { 2001 perror("could not enable watchpoints: " 2002 "opening /proc/self/ctl failed: "); 2003 } else { 2004 arc_watch = B_TRUE; 2005 } 2006 } 2007#endif 2008#endif /* illumos */ 2009 refcount_sysinit(); 2010 unique_init(); 2011 range_tree_init(); 2012 zio_init(); 2013 lz4_init(); 2014 dmu_init(); 2015 zil_init(); 2016 vdev_cache_stat_init(); 2017 vdev_file_init(); 2018 zfs_prop_init(); 2019 zpool_prop_init(); 2020 zpool_feature_init(); 2021 spa_config_load(); 2022 l2arc_start(); 2023#ifndef illumos 2024#ifdef _KERNEL 2025 zfs_deadman_init(); 2026#endif 2027#endif /* !illumos */ 2028} 2029 2030void 2031spa_fini(void) 2032{ 2033 l2arc_stop(); 2034 2035 spa_evict_all(); 2036 2037 vdev_file_fini(); 2038 vdev_cache_stat_fini(); 2039 zil_fini(); 2040 dmu_fini(); 2041 lz4_fini(); 2042 zio_fini(); 2043 range_tree_fini(); 2044 unique_fini(); 2045 refcount_fini(); 2046 2047 avl_destroy(&spa_namespace_avl); 2048 avl_destroy(&spa_spare_avl); 2049 avl_destroy(&spa_l2cache_avl); 2050 2051 cv_destroy(&spa_namespace_cv); 2052 mutex_destroy(&spa_namespace_lock); 2053 mutex_destroy(&spa_spare_lock); 2054 mutex_destroy(&spa_l2cache_lock); 2055} 2056 2057/* 2058 * Return whether this pool has slogs. No locking needed. 2059 * It's not a problem if the wrong answer is returned as it's only for 2060 * performance and not correctness 2061 */ 2062boolean_t 2063spa_has_slogs(spa_t *spa) 2064{ 2065 return (spa->spa_log_class->mc_rotor != NULL); 2066} 2067 2068spa_log_state_t 2069spa_get_log_state(spa_t *spa) 2070{ 2071 return (spa->spa_log_state); 2072} 2073 2074void 2075spa_set_log_state(spa_t *spa, spa_log_state_t state) 2076{ 2077 spa->spa_log_state = state; 2078} 2079 2080boolean_t 2081spa_is_root(spa_t *spa) 2082{ 2083 return (spa->spa_is_root); 2084} 2085 2086boolean_t 2087spa_writeable(spa_t *spa) 2088{ 2089 return (!!(spa->spa_mode & FWRITE)); 2090} 2091 2092/* 2093 * Returns true if there is a pending sync task in any of the current 2094 * syncing txg, the current quiescing txg, or the current open txg. 2095 */ 2096boolean_t 2097spa_has_pending_synctask(spa_t *spa) 2098{ 2099 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2100} 2101 2102int 2103spa_mode(spa_t *spa) 2104{ 2105 return (spa->spa_mode); 2106} 2107 2108uint64_t 2109spa_bootfs(spa_t *spa) 2110{ 2111 return (spa->spa_bootfs); 2112} 2113 2114uint64_t 2115spa_delegation(spa_t *spa) 2116{ 2117 return (spa->spa_delegation); 2118} 2119 2120objset_t * 2121spa_meta_objset(spa_t *spa) 2122{ 2123 return (spa->spa_meta_objset); 2124} 2125 2126enum zio_checksum 2127spa_dedup_checksum(spa_t *spa) 2128{ 2129 return (spa->spa_dedup_checksum); 2130} 2131 2132/* 2133 * Reset pool scan stat per scan pass (or reboot). 2134 */ 2135void 2136spa_scan_stat_init(spa_t *spa) 2137{ 2138 /* data not stored on disk */ 2139 spa->spa_scan_pass_start = gethrestime_sec(); 2140 spa->spa_scan_pass_exam = 0; 2141 vdev_scan_stat_init(spa->spa_root_vdev); 2142} 2143 2144/* 2145 * Get scan stats for zpool status reports 2146 */ 2147int 2148spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2149{ 2150 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2151 2152 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2153 return (SET_ERROR(ENOENT)); 2154 bzero(ps, sizeof (pool_scan_stat_t)); 2155 2156 /* data stored on disk */ 2157 ps->pss_func = scn->scn_phys.scn_func; 2158 ps->pss_start_time = scn->scn_phys.scn_start_time; 2159 ps->pss_end_time = scn->scn_phys.scn_end_time; 2160 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2161 ps->pss_examined = scn->scn_phys.scn_examined; 2162 ps->pss_to_process = scn->scn_phys.scn_to_process; 2163 ps->pss_processed = scn->scn_phys.scn_processed; 2164 ps->pss_errors = scn->scn_phys.scn_errors; 2165 ps->pss_state = scn->scn_phys.scn_state; 2166 2167 /* data not stored on disk */ 2168 ps->pss_pass_start = spa->spa_scan_pass_start; 2169 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2170 2171 return (0); 2172} 2173 2174boolean_t 2175spa_debug_enabled(spa_t *spa) 2176{ 2177 return (spa->spa_debug); 2178} 2179 2180int 2181spa_maxblocksize(spa_t *spa) 2182{ 2183 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2184 return (SPA_MAXBLOCKSIZE); 2185 else 2186 return (SPA_OLD_MAXBLOCKSIZE); 2187} 2188