spa_misc.c revision 307279
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31#include <sys/zfs_context.h> 32#include <sys/spa_impl.h> 33#include <sys/spa_boot.h> 34#include <sys/zio.h> 35#include <sys/zio_checksum.h> 36#include <sys/zio_compress.h> 37#include <sys/dmu.h> 38#include <sys/dmu_tx.h> 39#include <sys/zap.h> 40#include <sys/zil.h> 41#include <sys/vdev_impl.h> 42#include <sys/metaslab.h> 43#include <sys/uberblock_impl.h> 44#include <sys/txg.h> 45#include <sys/avl.h> 46#include <sys/unique.h> 47#include <sys/dsl_pool.h> 48#include <sys/dsl_dir.h> 49#include <sys/dsl_prop.h> 50#include <sys/dsl_scan.h> 51#include <sys/fs/zfs.h> 52#include <sys/metaslab_impl.h> 53#include <sys/arc.h> 54#include <sys/ddt.h> 55#include "zfs_prop.h" 56#include <sys/zfeature.h> 57 58/* 59 * SPA locking 60 * 61 * There are four basic locks for managing spa_t structures: 62 * 63 * spa_namespace_lock (global mutex) 64 * 65 * This lock must be acquired to do any of the following: 66 * 67 * - Lookup a spa_t by name 68 * - Add or remove a spa_t from the namespace 69 * - Increase spa_refcount from non-zero 70 * - Check if spa_refcount is zero 71 * - Rename a spa_t 72 * - add/remove/attach/detach devices 73 * - Held for the duration of create/destroy/import/export 74 * 75 * It does not need to handle recursion. A create or destroy may 76 * reference objects (files or zvols) in other pools, but by 77 * definition they must have an existing reference, and will never need 78 * to lookup a spa_t by name. 79 * 80 * spa_refcount (per-spa refcount_t protected by mutex) 81 * 82 * This reference count keep track of any active users of the spa_t. The 83 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 84 * the refcount is never really 'zero' - opening a pool implicitly keeps 85 * some references in the DMU. Internally we check against spa_minref, but 86 * present the image of a zero/non-zero value to consumers. 87 * 88 * spa_config_lock[] (per-spa array of rwlocks) 89 * 90 * This protects the spa_t from config changes, and must be held in 91 * the following circumstances: 92 * 93 * - RW_READER to perform I/O to the spa 94 * - RW_WRITER to change the vdev config 95 * 96 * The locking order is fairly straightforward: 97 * 98 * spa_namespace_lock -> spa_refcount 99 * 100 * The namespace lock must be acquired to increase the refcount from 0 101 * or to check if it is zero. 102 * 103 * spa_refcount -> spa_config_lock[] 104 * 105 * There must be at least one valid reference on the spa_t to acquire 106 * the config lock. 107 * 108 * spa_namespace_lock -> spa_config_lock[] 109 * 110 * The namespace lock must always be taken before the config lock. 111 * 112 * 113 * The spa_namespace_lock can be acquired directly and is globally visible. 114 * 115 * The namespace is manipulated using the following functions, all of which 116 * require the spa_namespace_lock to be held. 117 * 118 * spa_lookup() Lookup a spa_t by name. 119 * 120 * spa_add() Create a new spa_t in the namespace. 121 * 122 * spa_remove() Remove a spa_t from the namespace. This also 123 * frees up any memory associated with the spa_t. 124 * 125 * spa_next() Returns the next spa_t in the system, or the 126 * first if NULL is passed. 127 * 128 * spa_evict_all() Shutdown and remove all spa_t structures in 129 * the system. 130 * 131 * spa_guid_exists() Determine whether a pool/device guid exists. 132 * 133 * The spa_refcount is manipulated using the following functions: 134 * 135 * spa_open_ref() Adds a reference to the given spa_t. Must be 136 * called with spa_namespace_lock held if the 137 * refcount is currently zero. 138 * 139 * spa_close() Remove a reference from the spa_t. This will 140 * not free the spa_t or remove it from the 141 * namespace. No locking is required. 142 * 143 * spa_refcount_zero() Returns true if the refcount is currently 144 * zero. Must be called with spa_namespace_lock 145 * held. 146 * 147 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 148 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 149 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 150 * 151 * To read the configuration, it suffices to hold one of these locks as reader. 152 * To modify the configuration, you must hold all locks as writer. To modify 153 * vdev state without altering the vdev tree's topology (e.g. online/offline), 154 * you must hold SCL_STATE and SCL_ZIO as writer. 155 * 156 * We use these distinct config locks to avoid recursive lock entry. 157 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 158 * block allocations (SCL_ALLOC), which may require reading space maps 159 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 160 * 161 * The spa config locks cannot be normal rwlocks because we need the 162 * ability to hand off ownership. For example, SCL_ZIO is acquired 163 * by the issuing thread and later released by an interrupt thread. 164 * They do, however, obey the usual write-wanted semantics to prevent 165 * writer (i.e. system administrator) starvation. 166 * 167 * The lock acquisition rules are as follows: 168 * 169 * SCL_CONFIG 170 * Protects changes to the vdev tree topology, such as vdev 171 * add/remove/attach/detach. Protects the dirty config list 172 * (spa_config_dirty_list) and the set of spares and l2arc devices. 173 * 174 * SCL_STATE 175 * Protects changes to pool state and vdev state, such as vdev 176 * online/offline/fault/degrade/clear. Protects the dirty state list 177 * (spa_state_dirty_list) and global pool state (spa_state). 178 * 179 * SCL_ALLOC 180 * Protects changes to metaslab groups and classes. 181 * Held as reader by metaslab_alloc() and metaslab_claim(). 182 * 183 * SCL_ZIO 184 * Held by bp-level zios (those which have no io_vd upon entry) 185 * to prevent changes to the vdev tree. The bp-level zio implicitly 186 * protects all of its vdev child zios, which do not hold SCL_ZIO. 187 * 188 * SCL_FREE 189 * Protects changes to metaslab groups and classes. 190 * Held as reader by metaslab_free(). SCL_FREE is distinct from 191 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 192 * blocks in zio_done() while another i/o that holds either 193 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 194 * 195 * SCL_VDEV 196 * Held as reader to prevent changes to the vdev tree during trivial 197 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 198 * other locks, and lower than all of them, to ensure that it's safe 199 * to acquire regardless of caller context. 200 * 201 * In addition, the following rules apply: 202 * 203 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 204 * The lock ordering is SCL_CONFIG > spa_props_lock. 205 * 206 * (b) I/O operations on leaf vdevs. For any zio operation that takes 207 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 208 * or zio_write_phys() -- the caller must ensure that the config cannot 209 * cannot change in the interim, and that the vdev cannot be reopened. 210 * SCL_STATE as reader suffices for both. 211 * 212 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 213 * 214 * spa_vdev_enter() Acquire the namespace lock and the config lock 215 * for writing. 216 * 217 * spa_vdev_exit() Release the config lock, wait for all I/O 218 * to complete, sync the updated configs to the 219 * cache, and release the namespace lock. 220 * 221 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 222 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 223 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 224 * 225 * spa_rename() is also implemented within this file since it requires 226 * manipulation of the namespace. 227 */ 228 229static avl_tree_t spa_namespace_avl; 230kmutex_t spa_namespace_lock; 231static kcondvar_t spa_namespace_cv; 232static int spa_active_count; 233int spa_max_replication_override = SPA_DVAS_PER_BP; 234 235static kmutex_t spa_spare_lock; 236static avl_tree_t spa_spare_avl; 237static kmutex_t spa_l2cache_lock; 238static avl_tree_t spa_l2cache_avl; 239 240kmem_cache_t *spa_buffer_pool; 241int spa_mode_global; 242 243#ifdef ZFS_DEBUG 244/* Everything except dprintf and spa is on by default in debug builds */ 245int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 246#else 247int zfs_flags = 0; 248#endif 249SYSCTL_DECL(_debug); 250TUNABLE_INT("debug.zfs_flags", &zfs_flags); 251SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 252 "ZFS debug flags."); 253 254/* 255 * zfs_recover can be set to nonzero to attempt to recover from 256 * otherwise-fatal errors, typically caused by on-disk corruption. When 257 * set, calls to zfs_panic_recover() will turn into warning messages. 258 * This should only be used as a last resort, as it typically results 259 * in leaked space, or worse. 260 */ 261boolean_t zfs_recover = B_FALSE; 262SYSCTL_DECL(_vfs_zfs); 263TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 264SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 265 "Try to recover from otherwise-fatal errors."); 266 267static int 268sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 269{ 270 int err, val; 271 272 val = zfs_flags; 273 err = sysctl_handle_int(oidp, &val, 0, req); 274 if (err != 0 || req->newptr == NULL) 275 return (err); 276 277 /* 278 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 279 * arc buffers in the system have the necessary additional 280 * checksum data. However, it is safe to disable at any 281 * time. 282 */ 283 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 284 val &= ~ZFS_DEBUG_MODIFY; 285 zfs_flags = val; 286 287 return (0); 288} 289TUNABLE_INT("vfs.zfs.debug_flags", &zfs_flags); 290SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 291 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 292 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 293 294/* 295 * If destroy encounters an EIO while reading metadata (e.g. indirect 296 * blocks), space referenced by the missing metadata can not be freed. 297 * Normally this causes the background destroy to become "stalled", as 298 * it is unable to make forward progress. While in this stalled state, 299 * all remaining space to free from the error-encountering filesystem is 300 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 301 * permanently leak the space from indirect blocks that can not be read, 302 * and continue to free everything else that it can. 303 * 304 * The default, "stalling" behavior is useful if the storage partially 305 * fails (i.e. some but not all i/os fail), and then later recovers. In 306 * this case, we will be able to continue pool operations while it is 307 * partially failed, and when it recovers, we can continue to free the 308 * space, with no leaks. However, note that this case is actually 309 * fairly rare. 310 * 311 * Typically pools either (a) fail completely (but perhaps temporarily, 312 * e.g. a top-level vdev going offline), or (b) have localized, 313 * permanent errors (e.g. disk returns the wrong data due to bit flip or 314 * firmware bug). In case (a), this setting does not matter because the 315 * pool will be suspended and the sync thread will not be able to make 316 * forward progress regardless. In case (b), because the error is 317 * permanent, the best we can do is leak the minimum amount of space, 318 * which is what setting this flag will do. Therefore, it is reasonable 319 * for this flag to normally be set, but we chose the more conservative 320 * approach of not setting it, so that there is no possibility of 321 * leaking space in the "partial temporary" failure case. 322 */ 323boolean_t zfs_free_leak_on_eio = B_FALSE; 324 325/* 326 * Expiration time in milliseconds. This value has two meanings. First it is 327 * used to determine when the spa_deadman() logic should fire. By default the 328 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 329 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 330 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 331 * in a system panic. 332 */ 333uint64_t zfs_deadman_synctime_ms = 1000000ULL; 334TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 335SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 336 &zfs_deadman_synctime_ms, 0, 337 "Stalled ZFS I/O expiration time in milliseconds"); 338 339/* 340 * Check time in milliseconds. This defines the frequency at which we check 341 * for hung I/O. 342 */ 343uint64_t zfs_deadman_checktime_ms = 5000ULL; 344TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 345SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 346 &zfs_deadman_checktime_ms, 0, 347 "Period of checks for stalled ZFS I/O in milliseconds"); 348 349/* 350 * Default value of -1 for zfs_deadman_enabled is resolved in 351 * zfs_deadman_init() 352 */ 353int zfs_deadman_enabled = -1; 354TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 355SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 356 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 357 358/* 359 * The worst case is single-sector max-parity RAID-Z blocks, in which 360 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 361 * times the size; so just assume that. Add to this the fact that 362 * we can have up to 3 DVAs per bp, and one more factor of 2 because 363 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 364 * the worst case is: 365 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 366 */ 367int spa_asize_inflation = 24; 368TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation); 369SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 370 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 371 372#ifndef illumos 373#ifdef _KERNEL 374static void 375zfs_deadman_init() 376{ 377 /* 378 * If we are not i386 or amd64 or in a virtual machine, 379 * disable ZFS deadman thread by default 380 */ 381 if (zfs_deadman_enabled == -1) { 382#if defined(__amd64__) || defined(__i386__) 383 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 384#else 385 zfs_deadman_enabled = 0; 386#endif 387 } 388} 389#endif /* _KERNEL */ 390#endif /* !illumos */ 391 392/* 393 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 394 * the pool to be consumed. This ensures that we don't run the pool 395 * completely out of space, due to unaccounted changes (e.g. to the MOS). 396 * It also limits the worst-case time to allocate space. If we have 397 * less than this amount of free space, most ZPL operations (e.g. write, 398 * create) will return ENOSPC. 399 * 400 * Certain operations (e.g. file removal, most administrative actions) can 401 * use half the slop space. They will only return ENOSPC if less than half 402 * the slop space is free. Typically, once the pool has less than the slop 403 * space free, the user will use these operations to free up space in the pool. 404 * These are the operations that call dsl_pool_adjustedsize() with the netfree 405 * argument set to TRUE. 406 * 407 * A very restricted set of operations are always permitted, regardless of 408 * the amount of free space. These are the operations that call 409 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 410 * operations result in a net increase in the amount of space used, 411 * it is possible to run the pool completely out of space, causing it to 412 * be permanently read-only. 413 * 414 * Note that on very small pools, the slop space will be larger than 415 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 416 * but we never allow it to be more than half the pool size. 417 * 418 * See also the comments in zfs_space_check_t. 419 */ 420int spa_slop_shift = 5; 421SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 422 &spa_slop_shift, 0, 423 "Shift value of reserved space (1/(2^spa_slop_shift))."); 424uint64_t spa_min_slop = 128 * 1024 * 1024; 425SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, spa_min_slop, CTLFLAG_RWTUN, 426 &spa_min_slop, 0, 427 "Minimal value of reserved space"); 428 429/* 430 * ========================================================================== 431 * SPA config locking 432 * ========================================================================== 433 */ 434static void 435spa_config_lock_init(spa_t *spa) 436{ 437 for (int i = 0; i < SCL_LOCKS; i++) { 438 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 439 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 440 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 441 refcount_create_untracked(&scl->scl_count); 442 scl->scl_writer = NULL; 443 scl->scl_write_wanted = 0; 444 } 445} 446 447static void 448spa_config_lock_destroy(spa_t *spa) 449{ 450 for (int i = 0; i < SCL_LOCKS; i++) { 451 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 452 mutex_destroy(&scl->scl_lock); 453 cv_destroy(&scl->scl_cv); 454 refcount_destroy(&scl->scl_count); 455 ASSERT(scl->scl_writer == NULL); 456 ASSERT(scl->scl_write_wanted == 0); 457 } 458} 459 460int 461spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 462{ 463 for (int i = 0; i < SCL_LOCKS; i++) { 464 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 465 if (!(locks & (1 << i))) 466 continue; 467 mutex_enter(&scl->scl_lock); 468 if (rw == RW_READER) { 469 if (scl->scl_writer || scl->scl_write_wanted) { 470 mutex_exit(&scl->scl_lock); 471 spa_config_exit(spa, locks & ((1 << i) - 1), 472 tag); 473 return (0); 474 } 475 } else { 476 ASSERT(scl->scl_writer != curthread); 477 if (!refcount_is_zero(&scl->scl_count)) { 478 mutex_exit(&scl->scl_lock); 479 spa_config_exit(spa, locks & ((1 << i) - 1), 480 tag); 481 return (0); 482 } 483 scl->scl_writer = curthread; 484 } 485 (void) refcount_add(&scl->scl_count, tag); 486 mutex_exit(&scl->scl_lock); 487 } 488 return (1); 489} 490 491void 492spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 493{ 494 int wlocks_held = 0; 495 496 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 497 498 for (int i = 0; i < SCL_LOCKS; i++) { 499 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 500 if (scl->scl_writer == curthread) 501 wlocks_held |= (1 << i); 502 if (!(locks & (1 << i))) 503 continue; 504 mutex_enter(&scl->scl_lock); 505 if (rw == RW_READER) { 506 while (scl->scl_writer || scl->scl_write_wanted) { 507 cv_wait(&scl->scl_cv, &scl->scl_lock); 508 } 509 } else { 510 ASSERT(scl->scl_writer != curthread); 511 while (!refcount_is_zero(&scl->scl_count)) { 512 scl->scl_write_wanted++; 513 cv_wait(&scl->scl_cv, &scl->scl_lock); 514 scl->scl_write_wanted--; 515 } 516 scl->scl_writer = curthread; 517 } 518 (void) refcount_add(&scl->scl_count, tag); 519 mutex_exit(&scl->scl_lock); 520 } 521 ASSERT(wlocks_held <= locks); 522} 523 524void 525spa_config_exit(spa_t *spa, int locks, void *tag) 526{ 527 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 528 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 529 if (!(locks & (1 << i))) 530 continue; 531 mutex_enter(&scl->scl_lock); 532 ASSERT(!refcount_is_zero(&scl->scl_count)); 533 if (refcount_remove(&scl->scl_count, tag) == 0) { 534 ASSERT(scl->scl_writer == NULL || 535 scl->scl_writer == curthread); 536 scl->scl_writer = NULL; /* OK in either case */ 537 cv_broadcast(&scl->scl_cv); 538 } 539 mutex_exit(&scl->scl_lock); 540 } 541} 542 543int 544spa_config_held(spa_t *spa, int locks, krw_t rw) 545{ 546 int locks_held = 0; 547 548 for (int i = 0; i < SCL_LOCKS; i++) { 549 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 550 if (!(locks & (1 << i))) 551 continue; 552 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 553 (rw == RW_WRITER && scl->scl_writer == curthread)) 554 locks_held |= 1 << i; 555 } 556 557 return (locks_held); 558} 559 560/* 561 * ========================================================================== 562 * SPA namespace functions 563 * ========================================================================== 564 */ 565 566/* 567 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 568 * Returns NULL if no matching spa_t is found. 569 */ 570spa_t * 571spa_lookup(const char *name) 572{ 573 static spa_t search; /* spa_t is large; don't allocate on stack */ 574 spa_t *spa; 575 avl_index_t where; 576 char *cp; 577 578 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 579 580 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 581 582 /* 583 * If it's a full dataset name, figure out the pool name and 584 * just use that. 585 */ 586 cp = strpbrk(search.spa_name, "/@#"); 587 if (cp != NULL) 588 *cp = '\0'; 589 590 spa = avl_find(&spa_namespace_avl, &search, &where); 591 592 return (spa); 593} 594 595/* 596 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 597 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 598 * looking for potentially hung I/Os. 599 */ 600void 601spa_deadman(void *arg) 602{ 603 spa_t *spa = arg; 604 605 /* 606 * Disable the deadman timer if the pool is suspended. 607 */ 608 if (spa_suspended(spa)) { 609#ifdef illumos 610 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 611#else 612 /* Nothing. just don't schedule any future callouts. */ 613#endif 614 return; 615 } 616 617 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 618 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 619 ++spa->spa_deadman_calls); 620 if (zfs_deadman_enabled) 621 vdev_deadman(spa->spa_root_vdev); 622#ifdef __FreeBSD__ 623#ifdef _KERNEL 624 callout_schedule(&spa->spa_deadman_cycid, 625 hz * zfs_deadman_checktime_ms / MILLISEC); 626#endif 627#endif 628} 629 630/* 631 * Create an uninitialized spa_t with the given name. Requires 632 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 633 * exist by calling spa_lookup() first. 634 */ 635spa_t * 636spa_add(const char *name, nvlist_t *config, const char *altroot) 637{ 638 spa_t *spa; 639 spa_config_dirent_t *dp; 640#ifdef illumos 641 cyc_handler_t hdlr; 642 cyc_time_t when; 643#endif 644 645 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 646 647 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 648 649 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 650 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 651 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 652 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 653 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 654 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 655 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 656 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 657 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 658 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 659 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 660 mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL); 661 662 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 663 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 664 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 665 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 666 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 667 668 for (int t = 0; t < TXG_SIZE; t++) 669 bplist_create(&spa->spa_free_bplist[t]); 670 671 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 672 spa->spa_state = POOL_STATE_UNINITIALIZED; 673 spa->spa_freeze_txg = UINT64_MAX; 674 spa->spa_final_txg = UINT64_MAX; 675 spa->spa_load_max_txg = UINT64_MAX; 676 spa->spa_proc = &p0; 677 spa->spa_proc_state = SPA_PROC_NONE; 678 679#ifdef illumos 680 hdlr.cyh_func = spa_deadman; 681 hdlr.cyh_arg = spa; 682 hdlr.cyh_level = CY_LOW_LEVEL; 683#endif 684 685 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 686 687#ifdef illumos 688 /* 689 * This determines how often we need to check for hung I/Os after 690 * the cyclic has already fired. Since checking for hung I/Os is 691 * an expensive operation we don't want to check too frequently. 692 * Instead wait for 5 seconds before checking again. 693 */ 694 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 695 when.cyt_when = CY_INFINITY; 696 mutex_enter(&cpu_lock); 697 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 698 mutex_exit(&cpu_lock); 699#else /* !illumos */ 700#ifdef _KERNEL 701 callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE); 702#endif 703#endif 704 refcount_create(&spa->spa_refcount); 705 spa_config_lock_init(spa); 706 707 avl_add(&spa_namespace_avl, spa); 708 709 /* 710 * Set the alternate root, if there is one. 711 */ 712 if (altroot) { 713 spa->spa_root = spa_strdup(altroot); 714 spa_active_count++; 715 } 716 717 avl_create(&spa->spa_alloc_tree, zio_timestamp_compare, 718 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 719 720 /* 721 * Every pool starts with the default cachefile 722 */ 723 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 724 offsetof(spa_config_dirent_t, scd_link)); 725 726 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 727 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 728 list_insert_head(&spa->spa_config_list, dp); 729 730 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 731 KM_SLEEP) == 0); 732 733 if (config != NULL) { 734 nvlist_t *features; 735 736 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 737 &features) == 0) { 738 VERIFY(nvlist_dup(features, &spa->spa_label_features, 739 0) == 0); 740 } 741 742 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 743 } 744 745 if (spa->spa_label_features == NULL) { 746 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 747 KM_SLEEP) == 0); 748 } 749 750 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 751 752 spa->spa_min_ashift = INT_MAX; 753 spa->spa_max_ashift = 0; 754 755 /* 756 * As a pool is being created, treat all features as disabled by 757 * setting SPA_FEATURE_DISABLED for all entries in the feature 758 * refcount cache. 759 */ 760 for (int i = 0; i < SPA_FEATURES; i++) { 761 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 762 } 763 764 return (spa); 765} 766 767/* 768 * Removes a spa_t from the namespace, freeing up any memory used. Requires 769 * spa_namespace_lock. This is called only after the spa_t has been closed and 770 * deactivated. 771 */ 772void 773spa_remove(spa_t *spa) 774{ 775 spa_config_dirent_t *dp; 776 777 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 778 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 779 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 780 781 nvlist_free(spa->spa_config_splitting); 782 783 avl_remove(&spa_namespace_avl, spa); 784 cv_broadcast(&spa_namespace_cv); 785 786 if (spa->spa_root) { 787 spa_strfree(spa->spa_root); 788 spa_active_count--; 789 } 790 791 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 792 list_remove(&spa->spa_config_list, dp); 793 if (dp->scd_path != NULL) 794 spa_strfree(dp->scd_path); 795 kmem_free(dp, sizeof (spa_config_dirent_t)); 796 } 797 798 avl_destroy(&spa->spa_alloc_tree); 799 list_destroy(&spa->spa_config_list); 800 801 nvlist_free(spa->spa_label_features); 802 nvlist_free(spa->spa_load_info); 803 spa_config_set(spa, NULL); 804 805#ifdef illumos 806 mutex_enter(&cpu_lock); 807 if (spa->spa_deadman_cycid != CYCLIC_NONE) 808 cyclic_remove(spa->spa_deadman_cycid); 809 mutex_exit(&cpu_lock); 810 spa->spa_deadman_cycid = CYCLIC_NONE; 811#else /* !illumos */ 812#ifdef _KERNEL 813 callout_drain(&spa->spa_deadman_cycid); 814#endif 815#endif 816 817 refcount_destroy(&spa->spa_refcount); 818 819 spa_config_lock_destroy(spa); 820 821 for (int t = 0; t < TXG_SIZE; t++) 822 bplist_destroy(&spa->spa_free_bplist[t]); 823 824 zio_checksum_templates_free(spa); 825 826 cv_destroy(&spa->spa_async_cv); 827 cv_destroy(&spa->spa_evicting_os_cv); 828 cv_destroy(&spa->spa_proc_cv); 829 cv_destroy(&spa->spa_scrub_io_cv); 830 cv_destroy(&spa->spa_suspend_cv); 831 832 mutex_destroy(&spa->spa_alloc_lock); 833 mutex_destroy(&spa->spa_async_lock); 834 mutex_destroy(&spa->spa_errlist_lock); 835 mutex_destroy(&spa->spa_errlog_lock); 836 mutex_destroy(&spa->spa_evicting_os_lock); 837 mutex_destroy(&spa->spa_history_lock); 838 mutex_destroy(&spa->spa_proc_lock); 839 mutex_destroy(&spa->spa_props_lock); 840 mutex_destroy(&spa->spa_cksum_tmpls_lock); 841 mutex_destroy(&spa->spa_scrub_lock); 842 mutex_destroy(&spa->spa_suspend_lock); 843 mutex_destroy(&spa->spa_vdev_top_lock); 844 845 kmem_free(spa, sizeof (spa_t)); 846} 847 848/* 849 * Given a pool, return the next pool in the namespace, or NULL if there is 850 * none. If 'prev' is NULL, return the first pool. 851 */ 852spa_t * 853spa_next(spa_t *prev) 854{ 855 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 856 857 if (prev) 858 return (AVL_NEXT(&spa_namespace_avl, prev)); 859 else 860 return (avl_first(&spa_namespace_avl)); 861} 862 863/* 864 * ========================================================================== 865 * SPA refcount functions 866 * ========================================================================== 867 */ 868 869/* 870 * Add a reference to the given spa_t. Must have at least one reference, or 871 * have the namespace lock held. 872 */ 873void 874spa_open_ref(spa_t *spa, void *tag) 875{ 876 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 877 MUTEX_HELD(&spa_namespace_lock)); 878 (void) refcount_add(&spa->spa_refcount, tag); 879} 880 881/* 882 * Remove a reference to the given spa_t. Must have at least one reference, or 883 * have the namespace lock held. 884 */ 885void 886spa_close(spa_t *spa, void *tag) 887{ 888 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 889 MUTEX_HELD(&spa_namespace_lock)); 890 (void) refcount_remove(&spa->spa_refcount, tag); 891} 892 893/* 894 * Remove a reference to the given spa_t held by a dsl dir that is 895 * being asynchronously released. Async releases occur from a taskq 896 * performing eviction of dsl datasets and dirs. The namespace lock 897 * isn't held and the hold by the object being evicted may contribute to 898 * spa_minref (e.g. dataset or directory released during pool export), 899 * so the asserts in spa_close() do not apply. 900 */ 901void 902spa_async_close(spa_t *spa, void *tag) 903{ 904 (void) refcount_remove(&spa->spa_refcount, tag); 905} 906 907/* 908 * Check to see if the spa refcount is zero. Must be called with 909 * spa_namespace_lock held. We really compare against spa_minref, which is the 910 * number of references acquired when opening a pool 911 */ 912boolean_t 913spa_refcount_zero(spa_t *spa) 914{ 915 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 916 917 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 918} 919 920/* 921 * ========================================================================== 922 * SPA spare and l2cache tracking 923 * ========================================================================== 924 */ 925 926/* 927 * Hot spares and cache devices are tracked using the same code below, 928 * for 'auxiliary' devices. 929 */ 930 931typedef struct spa_aux { 932 uint64_t aux_guid; 933 uint64_t aux_pool; 934 avl_node_t aux_avl; 935 int aux_count; 936} spa_aux_t; 937 938static int 939spa_aux_compare(const void *a, const void *b) 940{ 941 const spa_aux_t *sa = a; 942 const spa_aux_t *sb = b; 943 944 if (sa->aux_guid < sb->aux_guid) 945 return (-1); 946 else if (sa->aux_guid > sb->aux_guid) 947 return (1); 948 else 949 return (0); 950} 951 952void 953spa_aux_add(vdev_t *vd, avl_tree_t *avl) 954{ 955 avl_index_t where; 956 spa_aux_t search; 957 spa_aux_t *aux; 958 959 search.aux_guid = vd->vdev_guid; 960 if ((aux = avl_find(avl, &search, &where)) != NULL) { 961 aux->aux_count++; 962 } else { 963 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 964 aux->aux_guid = vd->vdev_guid; 965 aux->aux_count = 1; 966 avl_insert(avl, aux, where); 967 } 968} 969 970void 971spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 972{ 973 spa_aux_t search; 974 spa_aux_t *aux; 975 avl_index_t where; 976 977 search.aux_guid = vd->vdev_guid; 978 aux = avl_find(avl, &search, &where); 979 980 ASSERT(aux != NULL); 981 982 if (--aux->aux_count == 0) { 983 avl_remove(avl, aux); 984 kmem_free(aux, sizeof (spa_aux_t)); 985 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 986 aux->aux_pool = 0ULL; 987 } 988} 989 990boolean_t 991spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 992{ 993 spa_aux_t search, *found; 994 995 search.aux_guid = guid; 996 found = avl_find(avl, &search, NULL); 997 998 if (pool) { 999 if (found) 1000 *pool = found->aux_pool; 1001 else 1002 *pool = 0ULL; 1003 } 1004 1005 if (refcnt) { 1006 if (found) 1007 *refcnt = found->aux_count; 1008 else 1009 *refcnt = 0; 1010 } 1011 1012 return (found != NULL); 1013} 1014 1015void 1016spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1017{ 1018 spa_aux_t search, *found; 1019 avl_index_t where; 1020 1021 search.aux_guid = vd->vdev_guid; 1022 found = avl_find(avl, &search, &where); 1023 ASSERT(found != NULL); 1024 ASSERT(found->aux_pool == 0ULL); 1025 1026 found->aux_pool = spa_guid(vd->vdev_spa); 1027} 1028 1029/* 1030 * Spares are tracked globally due to the following constraints: 1031 * 1032 * - A spare may be part of multiple pools. 1033 * - A spare may be added to a pool even if it's actively in use within 1034 * another pool. 1035 * - A spare in use in any pool can only be the source of a replacement if 1036 * the target is a spare in the same pool. 1037 * 1038 * We keep track of all spares on the system through the use of a reference 1039 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1040 * spare, then we bump the reference count in the AVL tree. In addition, we set 1041 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1042 * inactive). When a spare is made active (used to replace a device in the 1043 * pool), we also keep track of which pool its been made a part of. 1044 * 1045 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1046 * called under the spa_namespace lock as part of vdev reconfiguration. The 1047 * separate spare lock exists for the status query path, which does not need to 1048 * be completely consistent with respect to other vdev configuration changes. 1049 */ 1050 1051static int 1052spa_spare_compare(const void *a, const void *b) 1053{ 1054 return (spa_aux_compare(a, b)); 1055} 1056 1057void 1058spa_spare_add(vdev_t *vd) 1059{ 1060 mutex_enter(&spa_spare_lock); 1061 ASSERT(!vd->vdev_isspare); 1062 spa_aux_add(vd, &spa_spare_avl); 1063 vd->vdev_isspare = B_TRUE; 1064 mutex_exit(&spa_spare_lock); 1065} 1066 1067void 1068spa_spare_remove(vdev_t *vd) 1069{ 1070 mutex_enter(&spa_spare_lock); 1071 ASSERT(vd->vdev_isspare); 1072 spa_aux_remove(vd, &spa_spare_avl); 1073 vd->vdev_isspare = B_FALSE; 1074 mutex_exit(&spa_spare_lock); 1075} 1076 1077boolean_t 1078spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1079{ 1080 boolean_t found; 1081 1082 mutex_enter(&spa_spare_lock); 1083 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1084 mutex_exit(&spa_spare_lock); 1085 1086 return (found); 1087} 1088 1089void 1090spa_spare_activate(vdev_t *vd) 1091{ 1092 mutex_enter(&spa_spare_lock); 1093 ASSERT(vd->vdev_isspare); 1094 spa_aux_activate(vd, &spa_spare_avl); 1095 mutex_exit(&spa_spare_lock); 1096} 1097 1098/* 1099 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1100 * Cache devices currently only support one pool per cache device, and so 1101 * for these devices the aux reference count is currently unused beyond 1. 1102 */ 1103 1104static int 1105spa_l2cache_compare(const void *a, const void *b) 1106{ 1107 return (spa_aux_compare(a, b)); 1108} 1109 1110void 1111spa_l2cache_add(vdev_t *vd) 1112{ 1113 mutex_enter(&spa_l2cache_lock); 1114 ASSERT(!vd->vdev_isl2cache); 1115 spa_aux_add(vd, &spa_l2cache_avl); 1116 vd->vdev_isl2cache = B_TRUE; 1117 mutex_exit(&spa_l2cache_lock); 1118} 1119 1120void 1121spa_l2cache_remove(vdev_t *vd) 1122{ 1123 mutex_enter(&spa_l2cache_lock); 1124 ASSERT(vd->vdev_isl2cache); 1125 spa_aux_remove(vd, &spa_l2cache_avl); 1126 vd->vdev_isl2cache = B_FALSE; 1127 mutex_exit(&spa_l2cache_lock); 1128} 1129 1130boolean_t 1131spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1132{ 1133 boolean_t found; 1134 1135 mutex_enter(&spa_l2cache_lock); 1136 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1137 mutex_exit(&spa_l2cache_lock); 1138 1139 return (found); 1140} 1141 1142void 1143spa_l2cache_activate(vdev_t *vd) 1144{ 1145 mutex_enter(&spa_l2cache_lock); 1146 ASSERT(vd->vdev_isl2cache); 1147 spa_aux_activate(vd, &spa_l2cache_avl); 1148 mutex_exit(&spa_l2cache_lock); 1149} 1150 1151/* 1152 * ========================================================================== 1153 * SPA vdev locking 1154 * ========================================================================== 1155 */ 1156 1157/* 1158 * Lock the given spa_t for the purpose of adding or removing a vdev. 1159 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1160 * It returns the next transaction group for the spa_t. 1161 */ 1162uint64_t 1163spa_vdev_enter(spa_t *spa) 1164{ 1165 mutex_enter(&spa->spa_vdev_top_lock); 1166 mutex_enter(&spa_namespace_lock); 1167 return (spa_vdev_config_enter(spa)); 1168} 1169 1170/* 1171 * Internal implementation for spa_vdev_enter(). Used when a vdev 1172 * operation requires multiple syncs (i.e. removing a device) while 1173 * keeping the spa_namespace_lock held. 1174 */ 1175uint64_t 1176spa_vdev_config_enter(spa_t *spa) 1177{ 1178 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1179 1180 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1181 1182 return (spa_last_synced_txg(spa) + 1); 1183} 1184 1185/* 1186 * Used in combination with spa_vdev_config_enter() to allow the syncing 1187 * of multiple transactions without releasing the spa_namespace_lock. 1188 */ 1189void 1190spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1191{ 1192 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1193 1194 int config_changed = B_FALSE; 1195 1196 ASSERT(txg > spa_last_synced_txg(spa)); 1197 1198 spa->spa_pending_vdev = NULL; 1199 1200 /* 1201 * Reassess the DTLs. 1202 */ 1203 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1204 1205 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1206 config_changed = B_TRUE; 1207 spa->spa_config_generation++; 1208 } 1209 1210 /* 1211 * Verify the metaslab classes. 1212 */ 1213 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1214 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1215 1216 spa_config_exit(spa, SCL_ALL, spa); 1217 1218 /* 1219 * Panic the system if the specified tag requires it. This 1220 * is useful for ensuring that configurations are updated 1221 * transactionally. 1222 */ 1223 if (zio_injection_enabled) 1224 zio_handle_panic_injection(spa, tag, 0); 1225 1226 /* 1227 * Note: this txg_wait_synced() is important because it ensures 1228 * that there won't be more than one config change per txg. 1229 * This allows us to use the txg as the generation number. 1230 */ 1231 if (error == 0) 1232 txg_wait_synced(spa->spa_dsl_pool, txg); 1233 1234 if (vd != NULL) { 1235 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1236 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1237 vdev_free(vd); 1238 spa_config_exit(spa, SCL_ALL, spa); 1239 } 1240 1241 /* 1242 * If the config changed, update the config cache. 1243 */ 1244 if (config_changed) 1245 spa_config_sync(spa, B_FALSE, B_TRUE); 1246} 1247 1248/* 1249 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1250 * locking of spa_vdev_enter(), we also want make sure the transactions have 1251 * synced to disk, and then update the global configuration cache with the new 1252 * information. 1253 */ 1254int 1255spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1256{ 1257 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1258 mutex_exit(&spa_namespace_lock); 1259 mutex_exit(&spa->spa_vdev_top_lock); 1260 1261 return (error); 1262} 1263 1264/* 1265 * Lock the given spa_t for the purpose of changing vdev state. 1266 */ 1267void 1268spa_vdev_state_enter(spa_t *spa, int oplocks) 1269{ 1270 int locks = SCL_STATE_ALL | oplocks; 1271 1272 /* 1273 * Root pools may need to read of the underlying devfs filesystem 1274 * when opening up a vdev. Unfortunately if we're holding the 1275 * SCL_ZIO lock it will result in a deadlock when we try to issue 1276 * the read from the root filesystem. Instead we "prefetch" 1277 * the associated vnodes that we need prior to opening the 1278 * underlying devices and cache them so that we can prevent 1279 * any I/O when we are doing the actual open. 1280 */ 1281 if (spa_is_root(spa)) { 1282 int low = locks & ~(SCL_ZIO - 1); 1283 int high = locks & ~low; 1284 1285 spa_config_enter(spa, high, spa, RW_WRITER); 1286 vdev_hold(spa->spa_root_vdev); 1287 spa_config_enter(spa, low, spa, RW_WRITER); 1288 } else { 1289 spa_config_enter(spa, locks, spa, RW_WRITER); 1290 } 1291 spa->spa_vdev_locks = locks; 1292} 1293 1294int 1295spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1296{ 1297 boolean_t config_changed = B_FALSE; 1298 1299 if (vd != NULL || error == 0) 1300 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1301 0, 0, B_FALSE); 1302 1303 if (vd != NULL) { 1304 vdev_state_dirty(vd->vdev_top); 1305 config_changed = B_TRUE; 1306 spa->spa_config_generation++; 1307 } 1308 1309 if (spa_is_root(spa)) 1310 vdev_rele(spa->spa_root_vdev); 1311 1312 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1313 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1314 1315 /* 1316 * If anything changed, wait for it to sync. This ensures that, 1317 * from the system administrator's perspective, zpool(1M) commands 1318 * are synchronous. This is important for things like zpool offline: 1319 * when the command completes, you expect no further I/O from ZFS. 1320 */ 1321 if (vd != NULL) 1322 txg_wait_synced(spa->spa_dsl_pool, 0); 1323 1324 /* 1325 * If the config changed, update the config cache. 1326 */ 1327 if (config_changed) { 1328 mutex_enter(&spa_namespace_lock); 1329 spa_config_sync(spa, B_FALSE, B_TRUE); 1330 mutex_exit(&spa_namespace_lock); 1331 } 1332 1333 return (error); 1334} 1335 1336/* 1337 * ========================================================================== 1338 * Miscellaneous functions 1339 * ========================================================================== 1340 */ 1341 1342void 1343spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1344{ 1345 if (!nvlist_exists(spa->spa_label_features, feature)) { 1346 fnvlist_add_boolean(spa->spa_label_features, feature); 1347 /* 1348 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1349 * dirty the vdev config because lock SCL_CONFIG is not held. 1350 * Thankfully, in this case we don't need to dirty the config 1351 * because it will be written out anyway when we finish 1352 * creating the pool. 1353 */ 1354 if (tx->tx_txg != TXG_INITIAL) 1355 vdev_config_dirty(spa->spa_root_vdev); 1356 } 1357} 1358 1359void 1360spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1361{ 1362 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1363 vdev_config_dirty(spa->spa_root_vdev); 1364} 1365 1366/* 1367 * Rename a spa_t. 1368 */ 1369int 1370spa_rename(const char *name, const char *newname) 1371{ 1372 spa_t *spa; 1373 int err; 1374 1375 /* 1376 * Lookup the spa_t and grab the config lock for writing. We need to 1377 * actually open the pool so that we can sync out the necessary labels. 1378 * It's OK to call spa_open() with the namespace lock held because we 1379 * allow recursive calls for other reasons. 1380 */ 1381 mutex_enter(&spa_namespace_lock); 1382 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1383 mutex_exit(&spa_namespace_lock); 1384 return (err); 1385 } 1386 1387 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1388 1389 avl_remove(&spa_namespace_avl, spa); 1390 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1391 avl_add(&spa_namespace_avl, spa); 1392 1393 /* 1394 * Sync all labels to disk with the new names by marking the root vdev 1395 * dirty and waiting for it to sync. It will pick up the new pool name 1396 * during the sync. 1397 */ 1398 vdev_config_dirty(spa->spa_root_vdev); 1399 1400 spa_config_exit(spa, SCL_ALL, FTAG); 1401 1402 txg_wait_synced(spa->spa_dsl_pool, 0); 1403 1404 /* 1405 * Sync the updated config cache. 1406 */ 1407 spa_config_sync(spa, B_FALSE, B_TRUE); 1408 1409 spa_close(spa, FTAG); 1410 1411 mutex_exit(&spa_namespace_lock); 1412 1413 return (0); 1414} 1415 1416/* 1417 * Return the spa_t associated with given pool_guid, if it exists. If 1418 * device_guid is non-zero, determine whether the pool exists *and* contains 1419 * a device with the specified device_guid. 1420 */ 1421spa_t * 1422spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1423{ 1424 spa_t *spa; 1425 avl_tree_t *t = &spa_namespace_avl; 1426 1427 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1428 1429 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1430 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1431 continue; 1432 if (spa->spa_root_vdev == NULL) 1433 continue; 1434 if (spa_guid(spa) == pool_guid) { 1435 if (device_guid == 0) 1436 break; 1437 1438 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1439 device_guid) != NULL) 1440 break; 1441 1442 /* 1443 * Check any devices we may be in the process of adding. 1444 */ 1445 if (spa->spa_pending_vdev) { 1446 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1447 device_guid) != NULL) 1448 break; 1449 } 1450 } 1451 } 1452 1453 return (spa); 1454} 1455 1456/* 1457 * Determine whether a pool with the given pool_guid exists. 1458 */ 1459boolean_t 1460spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1461{ 1462 return (spa_by_guid(pool_guid, device_guid) != NULL); 1463} 1464 1465char * 1466spa_strdup(const char *s) 1467{ 1468 size_t len; 1469 char *new; 1470 1471 len = strlen(s); 1472 new = kmem_alloc(len + 1, KM_SLEEP); 1473 bcopy(s, new, len); 1474 new[len] = '\0'; 1475 1476 return (new); 1477} 1478 1479void 1480spa_strfree(char *s) 1481{ 1482 kmem_free(s, strlen(s) + 1); 1483} 1484 1485uint64_t 1486spa_get_random(uint64_t range) 1487{ 1488 uint64_t r; 1489 1490 ASSERT(range != 0); 1491 1492 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1493 1494 return (r % range); 1495} 1496 1497uint64_t 1498spa_generate_guid(spa_t *spa) 1499{ 1500 uint64_t guid = spa_get_random(-1ULL); 1501 1502 if (spa != NULL) { 1503 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1504 guid = spa_get_random(-1ULL); 1505 } else { 1506 while (guid == 0 || spa_guid_exists(guid, 0)) 1507 guid = spa_get_random(-1ULL); 1508 } 1509 1510 return (guid); 1511} 1512 1513void 1514snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1515{ 1516 char type[256]; 1517 char *checksum = NULL; 1518 char *compress = NULL; 1519 1520 if (bp != NULL) { 1521 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1522 dmu_object_byteswap_t bswap = 1523 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1524 (void) snprintf(type, sizeof (type), "bswap %s %s", 1525 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1526 "metadata" : "data", 1527 dmu_ot_byteswap[bswap].ob_name); 1528 } else { 1529 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1530 sizeof (type)); 1531 } 1532 if (!BP_IS_EMBEDDED(bp)) { 1533 checksum = 1534 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1535 } 1536 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1537 } 1538 1539 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1540 compress); 1541} 1542 1543void 1544spa_freeze(spa_t *spa) 1545{ 1546 uint64_t freeze_txg = 0; 1547 1548 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1549 if (spa->spa_freeze_txg == UINT64_MAX) { 1550 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1551 spa->spa_freeze_txg = freeze_txg; 1552 } 1553 spa_config_exit(spa, SCL_ALL, FTAG); 1554 if (freeze_txg != 0) 1555 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1556} 1557 1558void 1559zfs_panic_recover(const char *fmt, ...) 1560{ 1561 va_list adx; 1562 1563 va_start(adx, fmt); 1564 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1565 va_end(adx); 1566} 1567 1568/* 1569 * This is a stripped-down version of strtoull, suitable only for converting 1570 * lowercase hexadecimal numbers that don't overflow. 1571 */ 1572uint64_t 1573zfs_strtonum(const char *str, char **nptr) 1574{ 1575 uint64_t val = 0; 1576 char c; 1577 int digit; 1578 1579 while ((c = *str) != '\0') { 1580 if (c >= '0' && c <= '9') 1581 digit = c - '0'; 1582 else if (c >= 'a' && c <= 'f') 1583 digit = 10 + c - 'a'; 1584 else 1585 break; 1586 1587 val *= 16; 1588 val += digit; 1589 1590 str++; 1591 } 1592 1593 if (nptr) 1594 *nptr = (char *)str; 1595 1596 return (val); 1597} 1598 1599/* 1600 * ========================================================================== 1601 * Accessor functions 1602 * ========================================================================== 1603 */ 1604 1605boolean_t 1606spa_shutting_down(spa_t *spa) 1607{ 1608 return (spa->spa_async_suspended); 1609} 1610 1611dsl_pool_t * 1612spa_get_dsl(spa_t *spa) 1613{ 1614 return (spa->spa_dsl_pool); 1615} 1616 1617boolean_t 1618spa_is_initializing(spa_t *spa) 1619{ 1620 return (spa->spa_is_initializing); 1621} 1622 1623blkptr_t * 1624spa_get_rootblkptr(spa_t *spa) 1625{ 1626 return (&spa->spa_ubsync.ub_rootbp); 1627} 1628 1629void 1630spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1631{ 1632 spa->spa_uberblock.ub_rootbp = *bp; 1633} 1634 1635void 1636spa_altroot(spa_t *spa, char *buf, size_t buflen) 1637{ 1638 if (spa->spa_root == NULL) 1639 buf[0] = '\0'; 1640 else 1641 (void) strncpy(buf, spa->spa_root, buflen); 1642} 1643 1644int 1645spa_sync_pass(spa_t *spa) 1646{ 1647 return (spa->spa_sync_pass); 1648} 1649 1650char * 1651spa_name(spa_t *spa) 1652{ 1653 return (spa->spa_name); 1654} 1655 1656uint64_t 1657spa_guid(spa_t *spa) 1658{ 1659 dsl_pool_t *dp = spa_get_dsl(spa); 1660 uint64_t guid; 1661 1662 /* 1663 * If we fail to parse the config during spa_load(), we can go through 1664 * the error path (which posts an ereport) and end up here with no root 1665 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1666 * this case. 1667 */ 1668 if (spa->spa_root_vdev == NULL) 1669 return (spa->spa_config_guid); 1670 1671 guid = spa->spa_last_synced_guid != 0 ? 1672 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1673 1674 /* 1675 * Return the most recently synced out guid unless we're 1676 * in syncing context. 1677 */ 1678 if (dp && dsl_pool_sync_context(dp)) 1679 return (spa->spa_root_vdev->vdev_guid); 1680 else 1681 return (guid); 1682} 1683 1684uint64_t 1685spa_load_guid(spa_t *spa) 1686{ 1687 /* 1688 * This is a GUID that exists solely as a reference for the 1689 * purposes of the arc. It is generated at load time, and 1690 * is never written to persistent storage. 1691 */ 1692 return (spa->spa_load_guid); 1693} 1694 1695uint64_t 1696spa_last_synced_txg(spa_t *spa) 1697{ 1698 return (spa->spa_ubsync.ub_txg); 1699} 1700 1701uint64_t 1702spa_first_txg(spa_t *spa) 1703{ 1704 return (spa->spa_first_txg); 1705} 1706 1707uint64_t 1708spa_syncing_txg(spa_t *spa) 1709{ 1710 return (spa->spa_syncing_txg); 1711} 1712 1713pool_state_t 1714spa_state(spa_t *spa) 1715{ 1716 return (spa->spa_state); 1717} 1718 1719spa_load_state_t 1720spa_load_state(spa_t *spa) 1721{ 1722 return (spa->spa_load_state); 1723} 1724 1725uint64_t 1726spa_freeze_txg(spa_t *spa) 1727{ 1728 return (spa->spa_freeze_txg); 1729} 1730 1731/* ARGSUSED */ 1732uint64_t 1733spa_get_asize(spa_t *spa, uint64_t lsize) 1734{ 1735 return (lsize * spa_asize_inflation); 1736} 1737 1738/* 1739 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1740 * or at least 128MB, unless that would cause it to be more than half the 1741 * pool size. 1742 * 1743 * See the comment above spa_slop_shift for details. 1744 */ 1745uint64_t 1746spa_get_slop_space(spa_t *spa) 1747{ 1748 uint64_t space = spa_get_dspace(spa); 1749 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1750} 1751 1752uint64_t 1753spa_get_dspace(spa_t *spa) 1754{ 1755 return (spa->spa_dspace); 1756} 1757 1758void 1759spa_update_dspace(spa_t *spa) 1760{ 1761 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1762 ddt_get_dedup_dspace(spa); 1763} 1764 1765/* 1766 * Return the failure mode that has been set to this pool. The default 1767 * behavior will be to block all I/Os when a complete failure occurs. 1768 */ 1769uint8_t 1770spa_get_failmode(spa_t *spa) 1771{ 1772 return (spa->spa_failmode); 1773} 1774 1775boolean_t 1776spa_suspended(spa_t *spa) 1777{ 1778 return (spa->spa_suspended); 1779} 1780 1781uint64_t 1782spa_version(spa_t *spa) 1783{ 1784 return (spa->spa_ubsync.ub_version); 1785} 1786 1787boolean_t 1788spa_deflate(spa_t *spa) 1789{ 1790 return (spa->spa_deflate); 1791} 1792 1793metaslab_class_t * 1794spa_normal_class(spa_t *spa) 1795{ 1796 return (spa->spa_normal_class); 1797} 1798 1799metaslab_class_t * 1800spa_log_class(spa_t *spa) 1801{ 1802 return (spa->spa_log_class); 1803} 1804 1805void 1806spa_evicting_os_register(spa_t *spa, objset_t *os) 1807{ 1808 mutex_enter(&spa->spa_evicting_os_lock); 1809 list_insert_head(&spa->spa_evicting_os_list, os); 1810 mutex_exit(&spa->spa_evicting_os_lock); 1811} 1812 1813void 1814spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1815{ 1816 mutex_enter(&spa->spa_evicting_os_lock); 1817 list_remove(&spa->spa_evicting_os_list, os); 1818 cv_broadcast(&spa->spa_evicting_os_cv); 1819 mutex_exit(&spa->spa_evicting_os_lock); 1820} 1821 1822void 1823spa_evicting_os_wait(spa_t *spa) 1824{ 1825 mutex_enter(&spa->spa_evicting_os_lock); 1826 while (!list_is_empty(&spa->spa_evicting_os_list)) 1827 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1828 mutex_exit(&spa->spa_evicting_os_lock); 1829 1830 dmu_buf_user_evict_wait(); 1831} 1832 1833int 1834spa_max_replication(spa_t *spa) 1835{ 1836 /* 1837 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1838 * handle BPs with more than one DVA allocated. Set our max 1839 * replication level accordingly. 1840 */ 1841 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1842 return (1); 1843 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1844} 1845 1846int 1847spa_prev_software_version(spa_t *spa) 1848{ 1849 return (spa->spa_prev_software_version); 1850} 1851 1852uint64_t 1853spa_deadman_synctime(spa_t *spa) 1854{ 1855 return (spa->spa_deadman_synctime); 1856} 1857 1858uint64_t 1859dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1860{ 1861 uint64_t asize = DVA_GET_ASIZE(dva); 1862 uint64_t dsize = asize; 1863 1864 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1865 1866 if (asize != 0 && spa->spa_deflate) { 1867 uint64_t vdev = DVA_GET_VDEV(dva); 1868 vdev_t *vd = vdev_lookup_top(spa, vdev); 1869 if (vd == NULL) { 1870 panic( 1871 "dva_get_dsize_sync(): bad DVA %llu:%llu", 1872 (u_longlong_t)vdev, (u_longlong_t)asize); 1873 } 1874 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1875 } 1876 1877 return (dsize); 1878} 1879 1880uint64_t 1881bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1882{ 1883 uint64_t dsize = 0; 1884 1885 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1886 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1887 1888 return (dsize); 1889} 1890 1891uint64_t 1892bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1893{ 1894 uint64_t dsize = 0; 1895 1896 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1897 1898 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1899 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1900 1901 spa_config_exit(spa, SCL_VDEV, FTAG); 1902 1903 return (dsize); 1904} 1905 1906/* 1907 * ========================================================================== 1908 * Initialization and Termination 1909 * ========================================================================== 1910 */ 1911 1912static int 1913spa_name_compare(const void *a1, const void *a2) 1914{ 1915 const spa_t *s1 = a1; 1916 const spa_t *s2 = a2; 1917 int s; 1918 1919 s = strcmp(s1->spa_name, s2->spa_name); 1920 if (s > 0) 1921 return (1); 1922 if (s < 0) 1923 return (-1); 1924 return (0); 1925} 1926 1927int 1928spa_busy(void) 1929{ 1930 return (spa_active_count); 1931} 1932 1933void 1934spa_boot_init() 1935{ 1936 spa_config_load(); 1937} 1938 1939#ifdef _KERNEL 1940EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1941#endif 1942 1943void 1944spa_init(int mode) 1945{ 1946 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1947 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1948 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1949 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1950 1951 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1952 offsetof(spa_t, spa_avl)); 1953 1954 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1955 offsetof(spa_aux_t, aux_avl)); 1956 1957 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1958 offsetof(spa_aux_t, aux_avl)); 1959 1960 spa_mode_global = mode; 1961 1962#ifdef illumos 1963#ifdef _KERNEL 1964 spa_arch_init(); 1965#else 1966 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1967 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1968 if (arc_procfd == -1) { 1969 perror("could not enable watchpoints: " 1970 "opening /proc/self/ctl failed: "); 1971 } else { 1972 arc_watch = B_TRUE; 1973 } 1974 } 1975#endif 1976#endif /* illumos */ 1977 refcount_sysinit(); 1978 unique_init(); 1979 range_tree_init(); 1980 zio_init(); 1981 lz4_init(); 1982 dmu_init(); 1983 zil_init(); 1984 vdev_cache_stat_init(); 1985 zfs_prop_init(); 1986 zpool_prop_init(); 1987 zpool_feature_init(); 1988 spa_config_load(); 1989 l2arc_start(); 1990#ifndef illumos 1991#ifdef _KERNEL 1992 zfs_deadman_init(); 1993#endif 1994#endif /* !illumos */ 1995} 1996 1997void 1998spa_fini(void) 1999{ 2000 l2arc_stop(); 2001 2002 spa_evict_all(); 2003 2004 vdev_cache_stat_fini(); 2005 zil_fini(); 2006 dmu_fini(); 2007 lz4_fini(); 2008 zio_fini(); 2009 range_tree_fini(); 2010 unique_fini(); 2011 refcount_fini(); 2012 2013 avl_destroy(&spa_namespace_avl); 2014 avl_destroy(&spa_spare_avl); 2015 avl_destroy(&spa_l2cache_avl); 2016 2017 cv_destroy(&spa_namespace_cv); 2018 mutex_destroy(&spa_namespace_lock); 2019 mutex_destroy(&spa_spare_lock); 2020 mutex_destroy(&spa_l2cache_lock); 2021} 2022 2023/* 2024 * Return whether this pool has slogs. No locking needed. 2025 * It's not a problem if the wrong answer is returned as it's only for 2026 * performance and not correctness 2027 */ 2028boolean_t 2029spa_has_slogs(spa_t *spa) 2030{ 2031 return (spa->spa_log_class->mc_rotor != NULL); 2032} 2033 2034spa_log_state_t 2035spa_get_log_state(spa_t *spa) 2036{ 2037 return (spa->spa_log_state); 2038} 2039 2040void 2041spa_set_log_state(spa_t *spa, spa_log_state_t state) 2042{ 2043 spa->spa_log_state = state; 2044} 2045 2046boolean_t 2047spa_is_root(spa_t *spa) 2048{ 2049 return (spa->spa_is_root); 2050} 2051 2052boolean_t 2053spa_writeable(spa_t *spa) 2054{ 2055 return (!!(spa->spa_mode & FWRITE)); 2056} 2057 2058/* 2059 * Returns true if there is a pending sync task in any of the current 2060 * syncing txg, the current quiescing txg, or the current open txg. 2061 */ 2062boolean_t 2063spa_has_pending_synctask(spa_t *spa) 2064{ 2065 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2066} 2067 2068int 2069spa_mode(spa_t *spa) 2070{ 2071 return (spa->spa_mode); 2072} 2073 2074uint64_t 2075spa_bootfs(spa_t *spa) 2076{ 2077 return (spa->spa_bootfs); 2078} 2079 2080uint64_t 2081spa_delegation(spa_t *spa) 2082{ 2083 return (spa->spa_delegation); 2084} 2085 2086objset_t * 2087spa_meta_objset(spa_t *spa) 2088{ 2089 return (spa->spa_meta_objset); 2090} 2091 2092enum zio_checksum 2093spa_dedup_checksum(spa_t *spa) 2094{ 2095 return (spa->spa_dedup_checksum); 2096} 2097 2098/* 2099 * Reset pool scan stat per scan pass (or reboot). 2100 */ 2101void 2102spa_scan_stat_init(spa_t *spa) 2103{ 2104 /* data not stored on disk */ 2105 spa->spa_scan_pass_start = gethrestime_sec(); 2106 spa->spa_scan_pass_exam = 0; 2107 vdev_scan_stat_init(spa->spa_root_vdev); 2108} 2109 2110/* 2111 * Get scan stats for zpool status reports 2112 */ 2113int 2114spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2115{ 2116 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2117 2118 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2119 return (SET_ERROR(ENOENT)); 2120 bzero(ps, sizeof (pool_scan_stat_t)); 2121 2122 /* data stored on disk */ 2123 ps->pss_func = scn->scn_phys.scn_func; 2124 ps->pss_start_time = scn->scn_phys.scn_start_time; 2125 ps->pss_end_time = scn->scn_phys.scn_end_time; 2126 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2127 ps->pss_examined = scn->scn_phys.scn_examined; 2128 ps->pss_to_process = scn->scn_phys.scn_to_process; 2129 ps->pss_processed = scn->scn_phys.scn_processed; 2130 ps->pss_errors = scn->scn_phys.scn_errors; 2131 ps->pss_state = scn->scn_phys.scn_state; 2132 2133 /* data not stored on disk */ 2134 ps->pss_pass_start = spa->spa_scan_pass_start; 2135 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2136 2137 return (0); 2138} 2139 2140boolean_t 2141spa_debug_enabled(spa_t *spa) 2142{ 2143 return (spa->spa_debug); 2144} 2145 2146int 2147spa_maxblocksize(spa_t *spa) 2148{ 2149 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2150 return (SPA_MAXBLOCKSIZE); 2151 else 2152 return (SPA_OLD_MAXBLOCKSIZE); 2153} 2154