spa_misc.c revision 307126
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31#include <sys/zfs_context.h> 32#include <sys/spa_impl.h> 33#include <sys/spa_boot.h> 34#include <sys/zio.h> 35#include <sys/zio_checksum.h> 36#include <sys/zio_compress.h> 37#include <sys/dmu.h> 38#include <sys/dmu_tx.h> 39#include <sys/zap.h> 40#include <sys/zil.h> 41#include <sys/vdev_impl.h> 42#include <sys/metaslab.h> 43#include <sys/uberblock_impl.h> 44#include <sys/txg.h> 45#include <sys/avl.h> 46#include <sys/unique.h> 47#include <sys/dsl_pool.h> 48#include <sys/dsl_dir.h> 49#include <sys/dsl_prop.h> 50#include <sys/dsl_scan.h> 51#include <sys/fs/zfs.h> 52#include <sys/metaslab_impl.h> 53#include <sys/arc.h> 54#include <sys/ddt.h> 55#include "zfs_prop.h" 56#include <sys/zfeature.h> 57 58/* 59 * SPA locking 60 * 61 * There are four basic locks for managing spa_t structures: 62 * 63 * spa_namespace_lock (global mutex) 64 * 65 * This lock must be acquired to do any of the following: 66 * 67 * - Lookup a spa_t by name 68 * - Add or remove a spa_t from the namespace 69 * - Increase spa_refcount from non-zero 70 * - Check if spa_refcount is zero 71 * - Rename a spa_t 72 * - add/remove/attach/detach devices 73 * - Held for the duration of create/destroy/import/export 74 * 75 * It does not need to handle recursion. A create or destroy may 76 * reference objects (files or zvols) in other pools, but by 77 * definition they must have an existing reference, and will never need 78 * to lookup a spa_t by name. 79 * 80 * spa_refcount (per-spa refcount_t protected by mutex) 81 * 82 * This reference count keep track of any active users of the spa_t. The 83 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 84 * the refcount is never really 'zero' - opening a pool implicitly keeps 85 * some references in the DMU. Internally we check against spa_minref, but 86 * present the image of a zero/non-zero value to consumers. 87 * 88 * spa_config_lock[] (per-spa array of rwlocks) 89 * 90 * This protects the spa_t from config changes, and must be held in 91 * the following circumstances: 92 * 93 * - RW_READER to perform I/O to the spa 94 * - RW_WRITER to change the vdev config 95 * 96 * The locking order is fairly straightforward: 97 * 98 * spa_namespace_lock -> spa_refcount 99 * 100 * The namespace lock must be acquired to increase the refcount from 0 101 * or to check if it is zero. 102 * 103 * spa_refcount -> spa_config_lock[] 104 * 105 * There must be at least one valid reference on the spa_t to acquire 106 * the config lock. 107 * 108 * spa_namespace_lock -> spa_config_lock[] 109 * 110 * The namespace lock must always be taken before the config lock. 111 * 112 * 113 * The spa_namespace_lock can be acquired directly and is globally visible. 114 * 115 * The namespace is manipulated using the following functions, all of which 116 * require the spa_namespace_lock to be held. 117 * 118 * spa_lookup() Lookup a spa_t by name. 119 * 120 * spa_add() Create a new spa_t in the namespace. 121 * 122 * spa_remove() Remove a spa_t from the namespace. This also 123 * frees up any memory associated with the spa_t. 124 * 125 * spa_next() Returns the next spa_t in the system, or the 126 * first if NULL is passed. 127 * 128 * spa_evict_all() Shutdown and remove all spa_t structures in 129 * the system. 130 * 131 * spa_guid_exists() Determine whether a pool/device guid exists. 132 * 133 * The spa_refcount is manipulated using the following functions: 134 * 135 * spa_open_ref() Adds a reference to the given spa_t. Must be 136 * called with spa_namespace_lock held if the 137 * refcount is currently zero. 138 * 139 * spa_close() Remove a reference from the spa_t. This will 140 * not free the spa_t or remove it from the 141 * namespace. No locking is required. 142 * 143 * spa_refcount_zero() Returns true if the refcount is currently 144 * zero. Must be called with spa_namespace_lock 145 * held. 146 * 147 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 148 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 149 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 150 * 151 * To read the configuration, it suffices to hold one of these locks as reader. 152 * To modify the configuration, you must hold all locks as writer. To modify 153 * vdev state without altering the vdev tree's topology (e.g. online/offline), 154 * you must hold SCL_STATE and SCL_ZIO as writer. 155 * 156 * We use these distinct config locks to avoid recursive lock entry. 157 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 158 * block allocations (SCL_ALLOC), which may require reading space maps 159 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 160 * 161 * The spa config locks cannot be normal rwlocks because we need the 162 * ability to hand off ownership. For example, SCL_ZIO is acquired 163 * by the issuing thread and later released by an interrupt thread. 164 * They do, however, obey the usual write-wanted semantics to prevent 165 * writer (i.e. system administrator) starvation. 166 * 167 * The lock acquisition rules are as follows: 168 * 169 * SCL_CONFIG 170 * Protects changes to the vdev tree topology, such as vdev 171 * add/remove/attach/detach. Protects the dirty config list 172 * (spa_config_dirty_list) and the set of spares and l2arc devices. 173 * 174 * SCL_STATE 175 * Protects changes to pool state and vdev state, such as vdev 176 * online/offline/fault/degrade/clear. Protects the dirty state list 177 * (spa_state_dirty_list) and global pool state (spa_state). 178 * 179 * SCL_ALLOC 180 * Protects changes to metaslab groups and classes. 181 * Held as reader by metaslab_alloc() and metaslab_claim(). 182 * 183 * SCL_ZIO 184 * Held by bp-level zios (those which have no io_vd upon entry) 185 * to prevent changes to the vdev tree. The bp-level zio implicitly 186 * protects all of its vdev child zios, which do not hold SCL_ZIO. 187 * 188 * SCL_FREE 189 * Protects changes to metaslab groups and classes. 190 * Held as reader by metaslab_free(). SCL_FREE is distinct from 191 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 192 * blocks in zio_done() while another i/o that holds either 193 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 194 * 195 * SCL_VDEV 196 * Held as reader to prevent changes to the vdev tree during trivial 197 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 198 * other locks, and lower than all of them, to ensure that it's safe 199 * to acquire regardless of caller context. 200 * 201 * In addition, the following rules apply: 202 * 203 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 204 * The lock ordering is SCL_CONFIG > spa_props_lock. 205 * 206 * (b) I/O operations on leaf vdevs. For any zio operation that takes 207 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 208 * or zio_write_phys() -- the caller must ensure that the config cannot 209 * cannot change in the interim, and that the vdev cannot be reopened. 210 * SCL_STATE as reader suffices for both. 211 * 212 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 213 * 214 * spa_vdev_enter() Acquire the namespace lock and the config lock 215 * for writing. 216 * 217 * spa_vdev_exit() Release the config lock, wait for all I/O 218 * to complete, sync the updated configs to the 219 * cache, and release the namespace lock. 220 * 221 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 222 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 223 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 224 * 225 * spa_rename() is also implemented within this file since it requires 226 * manipulation of the namespace. 227 */ 228 229static avl_tree_t spa_namespace_avl; 230kmutex_t spa_namespace_lock; 231static kcondvar_t spa_namespace_cv; 232static int spa_active_count; 233int spa_max_replication_override = SPA_DVAS_PER_BP; 234 235static kmutex_t spa_spare_lock; 236static avl_tree_t spa_spare_avl; 237static kmutex_t spa_l2cache_lock; 238static avl_tree_t spa_l2cache_avl; 239 240kmem_cache_t *spa_buffer_pool; 241int spa_mode_global; 242 243#ifdef ZFS_DEBUG 244/* Everything except dprintf and spa is on by default in debug builds */ 245int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 246#else 247int zfs_flags = 0; 248#endif 249SYSCTL_DECL(_debug); 250TUNABLE_INT("debug.zfs_flags", &zfs_flags); 251SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 252 "ZFS debug flags."); 253 254/* 255 * zfs_recover can be set to nonzero to attempt to recover from 256 * otherwise-fatal errors, typically caused by on-disk corruption. When 257 * set, calls to zfs_panic_recover() will turn into warning messages. 258 * This should only be used as a last resort, as it typically results 259 * in leaked space, or worse. 260 */ 261boolean_t zfs_recover = B_FALSE; 262SYSCTL_DECL(_vfs_zfs); 263TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 264SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 265 "Try to recover from otherwise-fatal errors."); 266 267static int 268sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 269{ 270 int err, val; 271 272 val = zfs_flags; 273 err = sysctl_handle_int(oidp, &val, 0, req); 274 if (err != 0 || req->newptr == NULL) 275 return (err); 276 277 /* 278 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 279 * arc buffers in the system have the necessary additional 280 * checksum data. However, it is safe to disable at any 281 * time. 282 */ 283 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 284 val &= ~ZFS_DEBUG_MODIFY; 285 zfs_flags = val; 286 287 return (0); 288} 289TUNABLE_INT("vfs.zfs.debug_flags", &zfs_flags); 290SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 291 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 292 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 293 294/* 295 * If destroy encounters an EIO while reading metadata (e.g. indirect 296 * blocks), space referenced by the missing metadata can not be freed. 297 * Normally this causes the background destroy to become "stalled", as 298 * it is unable to make forward progress. While in this stalled state, 299 * all remaining space to free from the error-encountering filesystem is 300 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 301 * permanently leak the space from indirect blocks that can not be read, 302 * and continue to free everything else that it can. 303 * 304 * The default, "stalling" behavior is useful if the storage partially 305 * fails (i.e. some but not all i/os fail), and then later recovers. In 306 * this case, we will be able to continue pool operations while it is 307 * partially failed, and when it recovers, we can continue to free the 308 * space, with no leaks. However, note that this case is actually 309 * fairly rare. 310 * 311 * Typically pools either (a) fail completely (but perhaps temporarily, 312 * e.g. a top-level vdev going offline), or (b) have localized, 313 * permanent errors (e.g. disk returns the wrong data due to bit flip or 314 * firmware bug). In case (a), this setting does not matter because the 315 * pool will be suspended and the sync thread will not be able to make 316 * forward progress regardless. In case (b), because the error is 317 * permanent, the best we can do is leak the minimum amount of space, 318 * which is what setting this flag will do. Therefore, it is reasonable 319 * for this flag to normally be set, but we chose the more conservative 320 * approach of not setting it, so that there is no possibility of 321 * leaking space in the "partial temporary" failure case. 322 */ 323boolean_t zfs_free_leak_on_eio = B_FALSE; 324 325/* 326 * Expiration time in milliseconds. This value has two meanings. First it is 327 * used to determine when the spa_deadman() logic should fire. By default the 328 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 329 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 330 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 331 * in a system panic. 332 */ 333uint64_t zfs_deadman_synctime_ms = 1000000ULL; 334TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 335SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 336 &zfs_deadman_synctime_ms, 0, 337 "Stalled ZFS I/O expiration time in milliseconds"); 338 339/* 340 * Check time in milliseconds. This defines the frequency at which we check 341 * for hung I/O. 342 */ 343uint64_t zfs_deadman_checktime_ms = 5000ULL; 344TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 345SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 346 &zfs_deadman_checktime_ms, 0, 347 "Period of checks for stalled ZFS I/O in milliseconds"); 348 349/* 350 * Default value of -1 for zfs_deadman_enabled is resolved in 351 * zfs_deadman_init() 352 */ 353int zfs_deadman_enabled = -1; 354TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 355SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 356 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 357 358/* 359 * The worst case is single-sector max-parity RAID-Z blocks, in which 360 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 361 * times the size; so just assume that. Add to this the fact that 362 * we can have up to 3 DVAs per bp, and one more factor of 2 because 363 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 364 * the worst case is: 365 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 366 */ 367int spa_asize_inflation = 24; 368TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation); 369SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 370 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 371 372#ifndef illumos 373#ifdef _KERNEL 374static void 375zfs_deadman_init() 376{ 377 /* 378 * If we are not i386 or amd64 or in a virtual machine, 379 * disable ZFS deadman thread by default 380 */ 381 if (zfs_deadman_enabled == -1) { 382#if defined(__amd64__) || defined(__i386__) 383 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 384#else 385 zfs_deadman_enabled = 0; 386#endif 387 } 388} 389#endif /* _KERNEL */ 390#endif /* !illumos */ 391 392/* 393 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 394 * the pool to be consumed. This ensures that we don't run the pool 395 * completely out of space, due to unaccounted changes (e.g. to the MOS). 396 * It also limits the worst-case time to allocate space. If we have 397 * less than this amount of free space, most ZPL operations (e.g. write, 398 * create) will return ENOSPC. 399 * 400 * Certain operations (e.g. file removal, most administrative actions) can 401 * use half the slop space. They will only return ENOSPC if less than half 402 * the slop space is free. Typically, once the pool has less than the slop 403 * space free, the user will use these operations to free up space in the pool. 404 * These are the operations that call dsl_pool_adjustedsize() with the netfree 405 * argument set to TRUE. 406 * 407 * A very restricted set of operations are always permitted, regardless of 408 * the amount of free space. These are the operations that call 409 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 410 * operations result in a net increase in the amount of space used, 411 * it is possible to run the pool completely out of space, causing it to 412 * be permanently read-only. 413 * 414 * Note that on very small pools, the slop space will be larger than 415 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 416 * but we never allow it to be more than half the pool size. 417 * 418 * See also the comments in zfs_space_check_t. 419 */ 420int spa_slop_shift = 5; 421SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 422 &spa_slop_shift, 0, 423 "Shift value of reserved space (1/(2^spa_slop_shift))."); 424uint64_t spa_min_slop = 128 * 1024 * 1024; 425SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, spa_min_slop, CTLFLAG_RWTUN, 426 &spa_min_slop, 0, 427 "Minimal value of reserved space"); 428 429/* 430 * ========================================================================== 431 * SPA config locking 432 * ========================================================================== 433 */ 434static void 435spa_config_lock_init(spa_t *spa) 436{ 437 for (int i = 0; i < SCL_LOCKS; i++) { 438 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 439 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 440 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 441 refcount_create_untracked(&scl->scl_count); 442 scl->scl_writer = NULL; 443 scl->scl_write_wanted = 0; 444 } 445} 446 447static void 448spa_config_lock_destroy(spa_t *spa) 449{ 450 for (int i = 0; i < SCL_LOCKS; i++) { 451 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 452 mutex_destroy(&scl->scl_lock); 453 cv_destroy(&scl->scl_cv); 454 refcount_destroy(&scl->scl_count); 455 ASSERT(scl->scl_writer == NULL); 456 ASSERT(scl->scl_write_wanted == 0); 457 } 458} 459 460int 461spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 462{ 463 for (int i = 0; i < SCL_LOCKS; i++) { 464 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 465 if (!(locks & (1 << i))) 466 continue; 467 mutex_enter(&scl->scl_lock); 468 if (rw == RW_READER) { 469 if (scl->scl_writer || scl->scl_write_wanted) { 470 mutex_exit(&scl->scl_lock); 471 spa_config_exit(spa, locks & ((1 << i) - 1), 472 tag); 473 return (0); 474 } 475 } else { 476 ASSERT(scl->scl_writer != curthread); 477 if (!refcount_is_zero(&scl->scl_count)) { 478 mutex_exit(&scl->scl_lock); 479 spa_config_exit(spa, locks & ((1 << i) - 1), 480 tag); 481 return (0); 482 } 483 scl->scl_writer = curthread; 484 } 485 (void) refcount_add(&scl->scl_count, tag); 486 mutex_exit(&scl->scl_lock); 487 } 488 return (1); 489} 490 491void 492spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 493{ 494 int wlocks_held = 0; 495 496 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 497 498 for (int i = 0; i < SCL_LOCKS; i++) { 499 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 500 if (scl->scl_writer == curthread) 501 wlocks_held |= (1 << i); 502 if (!(locks & (1 << i))) 503 continue; 504 mutex_enter(&scl->scl_lock); 505 if (rw == RW_READER) { 506 while (scl->scl_writer || scl->scl_write_wanted) { 507 cv_wait(&scl->scl_cv, &scl->scl_lock); 508 } 509 } else { 510 ASSERT(scl->scl_writer != curthread); 511 while (!refcount_is_zero(&scl->scl_count)) { 512 scl->scl_write_wanted++; 513 cv_wait(&scl->scl_cv, &scl->scl_lock); 514 scl->scl_write_wanted--; 515 } 516 scl->scl_writer = curthread; 517 } 518 (void) refcount_add(&scl->scl_count, tag); 519 mutex_exit(&scl->scl_lock); 520 } 521 ASSERT(wlocks_held <= locks); 522} 523 524void 525spa_config_exit(spa_t *spa, int locks, void *tag) 526{ 527 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 528 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 529 if (!(locks & (1 << i))) 530 continue; 531 mutex_enter(&scl->scl_lock); 532 ASSERT(!refcount_is_zero(&scl->scl_count)); 533 if (refcount_remove(&scl->scl_count, tag) == 0) { 534 ASSERT(scl->scl_writer == NULL || 535 scl->scl_writer == curthread); 536 scl->scl_writer = NULL; /* OK in either case */ 537 cv_broadcast(&scl->scl_cv); 538 } 539 mutex_exit(&scl->scl_lock); 540 } 541} 542 543int 544spa_config_held(spa_t *spa, int locks, krw_t rw) 545{ 546 int locks_held = 0; 547 548 for (int i = 0; i < SCL_LOCKS; i++) { 549 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 550 if (!(locks & (1 << i))) 551 continue; 552 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 553 (rw == RW_WRITER && scl->scl_writer == curthread)) 554 locks_held |= 1 << i; 555 } 556 557 return (locks_held); 558} 559 560/* 561 * ========================================================================== 562 * SPA namespace functions 563 * ========================================================================== 564 */ 565 566/* 567 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 568 * Returns NULL if no matching spa_t is found. 569 */ 570spa_t * 571spa_lookup(const char *name) 572{ 573 static spa_t search; /* spa_t is large; don't allocate on stack */ 574 spa_t *spa; 575 avl_index_t where; 576 char *cp; 577 578 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 579 580 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 581 582 /* 583 * If it's a full dataset name, figure out the pool name and 584 * just use that. 585 */ 586 cp = strpbrk(search.spa_name, "/@#"); 587 if (cp != NULL) 588 *cp = '\0'; 589 590 spa = avl_find(&spa_namespace_avl, &search, &where); 591 592 return (spa); 593} 594 595/* 596 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 597 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 598 * looking for potentially hung I/Os. 599 */ 600void 601spa_deadman(void *arg) 602{ 603 spa_t *spa = arg; 604 605 /* 606 * Disable the deadman timer if the pool is suspended. 607 */ 608 if (spa_suspended(spa)) { 609#ifdef illumos 610 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 611#else 612 /* Nothing. just don't schedule any future callouts. */ 613#endif 614 return; 615 } 616 617 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 618 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 619 ++spa->spa_deadman_calls); 620 if (zfs_deadman_enabled) 621 vdev_deadman(spa->spa_root_vdev); 622#ifdef __FreeBSD__ 623#ifdef _KERNEL 624 callout_schedule(&spa->spa_deadman_cycid, 625 hz * zfs_deadman_checktime_ms / MILLISEC); 626#endif 627#endif 628} 629 630/* 631 * Create an uninitialized spa_t with the given name. Requires 632 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 633 * exist by calling spa_lookup() first. 634 */ 635spa_t * 636spa_add(const char *name, nvlist_t *config, const char *altroot) 637{ 638 spa_t *spa; 639 spa_config_dirent_t *dp; 640#ifdef illumos 641 cyc_handler_t hdlr; 642 cyc_time_t when; 643#endif 644 645 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 646 647 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 648 649 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 650 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 651 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 652 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 653 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 654 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 655 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 656 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 657 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 658 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 659 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 660 661 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 662 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 663 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 664 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 665 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 666 667 for (int t = 0; t < TXG_SIZE; t++) 668 bplist_create(&spa->spa_free_bplist[t]); 669 670 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 671 spa->spa_state = POOL_STATE_UNINITIALIZED; 672 spa->spa_freeze_txg = UINT64_MAX; 673 spa->spa_final_txg = UINT64_MAX; 674 spa->spa_load_max_txg = UINT64_MAX; 675 spa->spa_proc = &p0; 676 spa->spa_proc_state = SPA_PROC_NONE; 677 678#ifdef illumos 679 hdlr.cyh_func = spa_deadman; 680 hdlr.cyh_arg = spa; 681 hdlr.cyh_level = CY_LOW_LEVEL; 682#endif 683 684 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 685 686#ifdef illumos 687 /* 688 * This determines how often we need to check for hung I/Os after 689 * the cyclic has already fired. Since checking for hung I/Os is 690 * an expensive operation we don't want to check too frequently. 691 * Instead wait for 5 seconds before checking again. 692 */ 693 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 694 when.cyt_when = CY_INFINITY; 695 mutex_enter(&cpu_lock); 696 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 697 mutex_exit(&cpu_lock); 698#else /* !illumos */ 699#ifdef _KERNEL 700 callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE); 701#endif 702#endif 703 refcount_create(&spa->spa_refcount); 704 spa_config_lock_init(spa); 705 706 avl_add(&spa_namespace_avl, spa); 707 708 /* 709 * Set the alternate root, if there is one. 710 */ 711 if (altroot) { 712 spa->spa_root = spa_strdup(altroot); 713 spa_active_count++; 714 } 715 716 /* 717 * Every pool starts with the default cachefile 718 */ 719 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 720 offsetof(spa_config_dirent_t, scd_link)); 721 722 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 723 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 724 list_insert_head(&spa->spa_config_list, dp); 725 726 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 727 KM_SLEEP) == 0); 728 729 if (config != NULL) { 730 nvlist_t *features; 731 732 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 733 &features) == 0) { 734 VERIFY(nvlist_dup(features, &spa->spa_label_features, 735 0) == 0); 736 } 737 738 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 739 } 740 741 if (spa->spa_label_features == NULL) { 742 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 743 KM_SLEEP) == 0); 744 } 745 746 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 747 748 spa->spa_min_ashift = INT_MAX; 749 spa->spa_max_ashift = 0; 750 751 /* 752 * As a pool is being created, treat all features as disabled by 753 * setting SPA_FEATURE_DISABLED for all entries in the feature 754 * refcount cache. 755 */ 756 for (int i = 0; i < SPA_FEATURES; i++) { 757 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 758 } 759 760 return (spa); 761} 762 763/* 764 * Removes a spa_t from the namespace, freeing up any memory used. Requires 765 * spa_namespace_lock. This is called only after the spa_t has been closed and 766 * deactivated. 767 */ 768void 769spa_remove(spa_t *spa) 770{ 771 spa_config_dirent_t *dp; 772 773 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 774 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 775 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 776 777 nvlist_free(spa->spa_config_splitting); 778 779 avl_remove(&spa_namespace_avl, spa); 780 cv_broadcast(&spa_namespace_cv); 781 782 if (spa->spa_root) { 783 spa_strfree(spa->spa_root); 784 spa_active_count--; 785 } 786 787 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 788 list_remove(&spa->spa_config_list, dp); 789 if (dp->scd_path != NULL) 790 spa_strfree(dp->scd_path); 791 kmem_free(dp, sizeof (spa_config_dirent_t)); 792 } 793 794 list_destroy(&spa->spa_config_list); 795 796 nvlist_free(spa->spa_label_features); 797 nvlist_free(spa->spa_load_info); 798 spa_config_set(spa, NULL); 799 800#ifdef illumos 801 mutex_enter(&cpu_lock); 802 if (spa->spa_deadman_cycid != CYCLIC_NONE) 803 cyclic_remove(spa->spa_deadman_cycid); 804 mutex_exit(&cpu_lock); 805 spa->spa_deadman_cycid = CYCLIC_NONE; 806#else /* !illumos */ 807#ifdef _KERNEL 808 callout_drain(&spa->spa_deadman_cycid); 809#endif 810#endif 811 812 refcount_destroy(&spa->spa_refcount); 813 814 spa_config_lock_destroy(spa); 815 816 for (int t = 0; t < TXG_SIZE; t++) 817 bplist_destroy(&spa->spa_free_bplist[t]); 818 819 zio_checksum_templates_free(spa); 820 821 cv_destroy(&spa->spa_async_cv); 822 cv_destroy(&spa->spa_evicting_os_cv); 823 cv_destroy(&spa->spa_proc_cv); 824 cv_destroy(&spa->spa_scrub_io_cv); 825 cv_destroy(&spa->spa_suspend_cv); 826 827 mutex_destroy(&spa->spa_async_lock); 828 mutex_destroy(&spa->spa_errlist_lock); 829 mutex_destroy(&spa->spa_errlog_lock); 830 mutex_destroy(&spa->spa_evicting_os_lock); 831 mutex_destroy(&spa->spa_history_lock); 832 mutex_destroy(&spa->spa_proc_lock); 833 mutex_destroy(&spa->spa_props_lock); 834 mutex_destroy(&spa->spa_cksum_tmpls_lock); 835 mutex_destroy(&spa->spa_scrub_lock); 836 mutex_destroy(&spa->spa_suspend_lock); 837 mutex_destroy(&spa->spa_vdev_top_lock); 838 839 kmem_free(spa, sizeof (spa_t)); 840} 841 842/* 843 * Given a pool, return the next pool in the namespace, or NULL if there is 844 * none. If 'prev' is NULL, return the first pool. 845 */ 846spa_t * 847spa_next(spa_t *prev) 848{ 849 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 850 851 if (prev) 852 return (AVL_NEXT(&spa_namespace_avl, prev)); 853 else 854 return (avl_first(&spa_namespace_avl)); 855} 856 857/* 858 * ========================================================================== 859 * SPA refcount functions 860 * ========================================================================== 861 */ 862 863/* 864 * Add a reference to the given spa_t. Must have at least one reference, or 865 * have the namespace lock held. 866 */ 867void 868spa_open_ref(spa_t *spa, void *tag) 869{ 870 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 871 MUTEX_HELD(&spa_namespace_lock)); 872 (void) refcount_add(&spa->spa_refcount, tag); 873} 874 875/* 876 * Remove a reference to the given spa_t. Must have at least one reference, or 877 * have the namespace lock held. 878 */ 879void 880spa_close(spa_t *spa, void *tag) 881{ 882 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 883 MUTEX_HELD(&spa_namespace_lock)); 884 (void) refcount_remove(&spa->spa_refcount, tag); 885} 886 887/* 888 * Remove a reference to the given spa_t held by a dsl dir that is 889 * being asynchronously released. Async releases occur from a taskq 890 * performing eviction of dsl datasets and dirs. The namespace lock 891 * isn't held and the hold by the object being evicted may contribute to 892 * spa_minref (e.g. dataset or directory released during pool export), 893 * so the asserts in spa_close() do not apply. 894 */ 895void 896spa_async_close(spa_t *spa, void *tag) 897{ 898 (void) refcount_remove(&spa->spa_refcount, tag); 899} 900 901/* 902 * Check to see if the spa refcount is zero. Must be called with 903 * spa_namespace_lock held. We really compare against spa_minref, which is the 904 * number of references acquired when opening a pool 905 */ 906boolean_t 907spa_refcount_zero(spa_t *spa) 908{ 909 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 910 911 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 912} 913 914/* 915 * ========================================================================== 916 * SPA spare and l2cache tracking 917 * ========================================================================== 918 */ 919 920/* 921 * Hot spares and cache devices are tracked using the same code below, 922 * for 'auxiliary' devices. 923 */ 924 925typedef struct spa_aux { 926 uint64_t aux_guid; 927 uint64_t aux_pool; 928 avl_node_t aux_avl; 929 int aux_count; 930} spa_aux_t; 931 932static int 933spa_aux_compare(const void *a, const void *b) 934{ 935 const spa_aux_t *sa = a; 936 const spa_aux_t *sb = b; 937 938 if (sa->aux_guid < sb->aux_guid) 939 return (-1); 940 else if (sa->aux_guid > sb->aux_guid) 941 return (1); 942 else 943 return (0); 944} 945 946void 947spa_aux_add(vdev_t *vd, avl_tree_t *avl) 948{ 949 avl_index_t where; 950 spa_aux_t search; 951 spa_aux_t *aux; 952 953 search.aux_guid = vd->vdev_guid; 954 if ((aux = avl_find(avl, &search, &where)) != NULL) { 955 aux->aux_count++; 956 } else { 957 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 958 aux->aux_guid = vd->vdev_guid; 959 aux->aux_count = 1; 960 avl_insert(avl, aux, where); 961 } 962} 963 964void 965spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 966{ 967 spa_aux_t search; 968 spa_aux_t *aux; 969 avl_index_t where; 970 971 search.aux_guid = vd->vdev_guid; 972 aux = avl_find(avl, &search, &where); 973 974 ASSERT(aux != NULL); 975 976 if (--aux->aux_count == 0) { 977 avl_remove(avl, aux); 978 kmem_free(aux, sizeof (spa_aux_t)); 979 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 980 aux->aux_pool = 0ULL; 981 } 982} 983 984boolean_t 985spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 986{ 987 spa_aux_t search, *found; 988 989 search.aux_guid = guid; 990 found = avl_find(avl, &search, NULL); 991 992 if (pool) { 993 if (found) 994 *pool = found->aux_pool; 995 else 996 *pool = 0ULL; 997 } 998 999 if (refcnt) { 1000 if (found) 1001 *refcnt = found->aux_count; 1002 else 1003 *refcnt = 0; 1004 } 1005 1006 return (found != NULL); 1007} 1008 1009void 1010spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1011{ 1012 spa_aux_t search, *found; 1013 avl_index_t where; 1014 1015 search.aux_guid = vd->vdev_guid; 1016 found = avl_find(avl, &search, &where); 1017 ASSERT(found != NULL); 1018 ASSERT(found->aux_pool == 0ULL); 1019 1020 found->aux_pool = spa_guid(vd->vdev_spa); 1021} 1022 1023/* 1024 * Spares are tracked globally due to the following constraints: 1025 * 1026 * - A spare may be part of multiple pools. 1027 * - A spare may be added to a pool even if it's actively in use within 1028 * another pool. 1029 * - A spare in use in any pool can only be the source of a replacement if 1030 * the target is a spare in the same pool. 1031 * 1032 * We keep track of all spares on the system through the use of a reference 1033 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1034 * spare, then we bump the reference count in the AVL tree. In addition, we set 1035 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1036 * inactive). When a spare is made active (used to replace a device in the 1037 * pool), we also keep track of which pool its been made a part of. 1038 * 1039 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1040 * called under the spa_namespace lock as part of vdev reconfiguration. The 1041 * separate spare lock exists for the status query path, which does not need to 1042 * be completely consistent with respect to other vdev configuration changes. 1043 */ 1044 1045static int 1046spa_spare_compare(const void *a, const void *b) 1047{ 1048 return (spa_aux_compare(a, b)); 1049} 1050 1051void 1052spa_spare_add(vdev_t *vd) 1053{ 1054 mutex_enter(&spa_spare_lock); 1055 ASSERT(!vd->vdev_isspare); 1056 spa_aux_add(vd, &spa_spare_avl); 1057 vd->vdev_isspare = B_TRUE; 1058 mutex_exit(&spa_spare_lock); 1059} 1060 1061void 1062spa_spare_remove(vdev_t *vd) 1063{ 1064 mutex_enter(&spa_spare_lock); 1065 ASSERT(vd->vdev_isspare); 1066 spa_aux_remove(vd, &spa_spare_avl); 1067 vd->vdev_isspare = B_FALSE; 1068 mutex_exit(&spa_spare_lock); 1069} 1070 1071boolean_t 1072spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1073{ 1074 boolean_t found; 1075 1076 mutex_enter(&spa_spare_lock); 1077 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1078 mutex_exit(&spa_spare_lock); 1079 1080 return (found); 1081} 1082 1083void 1084spa_spare_activate(vdev_t *vd) 1085{ 1086 mutex_enter(&spa_spare_lock); 1087 ASSERT(vd->vdev_isspare); 1088 spa_aux_activate(vd, &spa_spare_avl); 1089 mutex_exit(&spa_spare_lock); 1090} 1091 1092/* 1093 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1094 * Cache devices currently only support one pool per cache device, and so 1095 * for these devices the aux reference count is currently unused beyond 1. 1096 */ 1097 1098static int 1099spa_l2cache_compare(const void *a, const void *b) 1100{ 1101 return (spa_aux_compare(a, b)); 1102} 1103 1104void 1105spa_l2cache_add(vdev_t *vd) 1106{ 1107 mutex_enter(&spa_l2cache_lock); 1108 ASSERT(!vd->vdev_isl2cache); 1109 spa_aux_add(vd, &spa_l2cache_avl); 1110 vd->vdev_isl2cache = B_TRUE; 1111 mutex_exit(&spa_l2cache_lock); 1112} 1113 1114void 1115spa_l2cache_remove(vdev_t *vd) 1116{ 1117 mutex_enter(&spa_l2cache_lock); 1118 ASSERT(vd->vdev_isl2cache); 1119 spa_aux_remove(vd, &spa_l2cache_avl); 1120 vd->vdev_isl2cache = B_FALSE; 1121 mutex_exit(&spa_l2cache_lock); 1122} 1123 1124boolean_t 1125spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1126{ 1127 boolean_t found; 1128 1129 mutex_enter(&spa_l2cache_lock); 1130 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1131 mutex_exit(&spa_l2cache_lock); 1132 1133 return (found); 1134} 1135 1136void 1137spa_l2cache_activate(vdev_t *vd) 1138{ 1139 mutex_enter(&spa_l2cache_lock); 1140 ASSERT(vd->vdev_isl2cache); 1141 spa_aux_activate(vd, &spa_l2cache_avl); 1142 mutex_exit(&spa_l2cache_lock); 1143} 1144 1145/* 1146 * ========================================================================== 1147 * SPA vdev locking 1148 * ========================================================================== 1149 */ 1150 1151/* 1152 * Lock the given spa_t for the purpose of adding or removing a vdev. 1153 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1154 * It returns the next transaction group for the spa_t. 1155 */ 1156uint64_t 1157spa_vdev_enter(spa_t *spa) 1158{ 1159 mutex_enter(&spa->spa_vdev_top_lock); 1160 mutex_enter(&spa_namespace_lock); 1161 return (spa_vdev_config_enter(spa)); 1162} 1163 1164/* 1165 * Internal implementation for spa_vdev_enter(). Used when a vdev 1166 * operation requires multiple syncs (i.e. removing a device) while 1167 * keeping the spa_namespace_lock held. 1168 */ 1169uint64_t 1170spa_vdev_config_enter(spa_t *spa) 1171{ 1172 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1173 1174 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1175 1176 return (spa_last_synced_txg(spa) + 1); 1177} 1178 1179/* 1180 * Used in combination with spa_vdev_config_enter() to allow the syncing 1181 * of multiple transactions without releasing the spa_namespace_lock. 1182 */ 1183void 1184spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1185{ 1186 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1187 1188 int config_changed = B_FALSE; 1189 1190 ASSERT(txg > spa_last_synced_txg(spa)); 1191 1192 spa->spa_pending_vdev = NULL; 1193 1194 /* 1195 * Reassess the DTLs. 1196 */ 1197 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1198 1199 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1200 config_changed = B_TRUE; 1201 spa->spa_config_generation++; 1202 } 1203 1204 /* 1205 * Verify the metaslab classes. 1206 */ 1207 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1208 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1209 1210 spa_config_exit(spa, SCL_ALL, spa); 1211 1212 /* 1213 * Panic the system if the specified tag requires it. This 1214 * is useful for ensuring that configurations are updated 1215 * transactionally. 1216 */ 1217 if (zio_injection_enabled) 1218 zio_handle_panic_injection(spa, tag, 0); 1219 1220 /* 1221 * Note: this txg_wait_synced() is important because it ensures 1222 * that there won't be more than one config change per txg. 1223 * This allows us to use the txg as the generation number. 1224 */ 1225 if (error == 0) 1226 txg_wait_synced(spa->spa_dsl_pool, txg); 1227 1228 if (vd != NULL) { 1229 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1230 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1231 vdev_free(vd); 1232 spa_config_exit(spa, SCL_ALL, spa); 1233 } 1234 1235 /* 1236 * If the config changed, update the config cache. 1237 */ 1238 if (config_changed) 1239 spa_config_sync(spa, B_FALSE, B_TRUE); 1240} 1241 1242/* 1243 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1244 * locking of spa_vdev_enter(), we also want make sure the transactions have 1245 * synced to disk, and then update the global configuration cache with the new 1246 * information. 1247 */ 1248int 1249spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1250{ 1251 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1252 mutex_exit(&spa_namespace_lock); 1253 mutex_exit(&spa->spa_vdev_top_lock); 1254 1255 return (error); 1256} 1257 1258/* 1259 * Lock the given spa_t for the purpose of changing vdev state. 1260 */ 1261void 1262spa_vdev_state_enter(spa_t *spa, int oplocks) 1263{ 1264 int locks = SCL_STATE_ALL | oplocks; 1265 1266 /* 1267 * Root pools may need to read of the underlying devfs filesystem 1268 * when opening up a vdev. Unfortunately if we're holding the 1269 * SCL_ZIO lock it will result in a deadlock when we try to issue 1270 * the read from the root filesystem. Instead we "prefetch" 1271 * the associated vnodes that we need prior to opening the 1272 * underlying devices and cache them so that we can prevent 1273 * any I/O when we are doing the actual open. 1274 */ 1275 if (spa_is_root(spa)) { 1276 int low = locks & ~(SCL_ZIO - 1); 1277 int high = locks & ~low; 1278 1279 spa_config_enter(spa, high, spa, RW_WRITER); 1280 vdev_hold(spa->spa_root_vdev); 1281 spa_config_enter(spa, low, spa, RW_WRITER); 1282 } else { 1283 spa_config_enter(spa, locks, spa, RW_WRITER); 1284 } 1285 spa->spa_vdev_locks = locks; 1286} 1287 1288int 1289spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1290{ 1291 boolean_t config_changed = B_FALSE; 1292 1293 if (vd != NULL || error == 0) 1294 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1295 0, 0, B_FALSE); 1296 1297 if (vd != NULL) { 1298 vdev_state_dirty(vd->vdev_top); 1299 config_changed = B_TRUE; 1300 spa->spa_config_generation++; 1301 } 1302 1303 if (spa_is_root(spa)) 1304 vdev_rele(spa->spa_root_vdev); 1305 1306 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1307 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1308 1309 /* 1310 * If anything changed, wait for it to sync. This ensures that, 1311 * from the system administrator's perspective, zpool(1M) commands 1312 * are synchronous. This is important for things like zpool offline: 1313 * when the command completes, you expect no further I/O from ZFS. 1314 */ 1315 if (vd != NULL) 1316 txg_wait_synced(spa->spa_dsl_pool, 0); 1317 1318 /* 1319 * If the config changed, update the config cache. 1320 */ 1321 if (config_changed) { 1322 mutex_enter(&spa_namespace_lock); 1323 spa_config_sync(spa, B_FALSE, B_TRUE); 1324 mutex_exit(&spa_namespace_lock); 1325 } 1326 1327 return (error); 1328} 1329 1330/* 1331 * ========================================================================== 1332 * Miscellaneous functions 1333 * ========================================================================== 1334 */ 1335 1336void 1337spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1338{ 1339 if (!nvlist_exists(spa->spa_label_features, feature)) { 1340 fnvlist_add_boolean(spa->spa_label_features, feature); 1341 /* 1342 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1343 * dirty the vdev config because lock SCL_CONFIG is not held. 1344 * Thankfully, in this case we don't need to dirty the config 1345 * because it will be written out anyway when we finish 1346 * creating the pool. 1347 */ 1348 if (tx->tx_txg != TXG_INITIAL) 1349 vdev_config_dirty(spa->spa_root_vdev); 1350 } 1351} 1352 1353void 1354spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1355{ 1356 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1357 vdev_config_dirty(spa->spa_root_vdev); 1358} 1359 1360/* 1361 * Rename a spa_t. 1362 */ 1363int 1364spa_rename(const char *name, const char *newname) 1365{ 1366 spa_t *spa; 1367 int err; 1368 1369 /* 1370 * Lookup the spa_t and grab the config lock for writing. We need to 1371 * actually open the pool so that we can sync out the necessary labels. 1372 * It's OK to call spa_open() with the namespace lock held because we 1373 * allow recursive calls for other reasons. 1374 */ 1375 mutex_enter(&spa_namespace_lock); 1376 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1377 mutex_exit(&spa_namespace_lock); 1378 return (err); 1379 } 1380 1381 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1382 1383 avl_remove(&spa_namespace_avl, spa); 1384 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1385 avl_add(&spa_namespace_avl, spa); 1386 1387 /* 1388 * Sync all labels to disk with the new names by marking the root vdev 1389 * dirty and waiting for it to sync. It will pick up the new pool name 1390 * during the sync. 1391 */ 1392 vdev_config_dirty(spa->spa_root_vdev); 1393 1394 spa_config_exit(spa, SCL_ALL, FTAG); 1395 1396 txg_wait_synced(spa->spa_dsl_pool, 0); 1397 1398 /* 1399 * Sync the updated config cache. 1400 */ 1401 spa_config_sync(spa, B_FALSE, B_TRUE); 1402 1403 spa_close(spa, FTAG); 1404 1405 mutex_exit(&spa_namespace_lock); 1406 1407 return (0); 1408} 1409 1410/* 1411 * Return the spa_t associated with given pool_guid, if it exists. If 1412 * device_guid is non-zero, determine whether the pool exists *and* contains 1413 * a device with the specified device_guid. 1414 */ 1415spa_t * 1416spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1417{ 1418 spa_t *spa; 1419 avl_tree_t *t = &spa_namespace_avl; 1420 1421 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1422 1423 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1424 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1425 continue; 1426 if (spa->spa_root_vdev == NULL) 1427 continue; 1428 if (spa_guid(spa) == pool_guid) { 1429 if (device_guid == 0) 1430 break; 1431 1432 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1433 device_guid) != NULL) 1434 break; 1435 1436 /* 1437 * Check any devices we may be in the process of adding. 1438 */ 1439 if (spa->spa_pending_vdev) { 1440 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1441 device_guid) != NULL) 1442 break; 1443 } 1444 } 1445 } 1446 1447 return (spa); 1448} 1449 1450/* 1451 * Determine whether a pool with the given pool_guid exists. 1452 */ 1453boolean_t 1454spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1455{ 1456 return (spa_by_guid(pool_guid, device_guid) != NULL); 1457} 1458 1459char * 1460spa_strdup(const char *s) 1461{ 1462 size_t len; 1463 char *new; 1464 1465 len = strlen(s); 1466 new = kmem_alloc(len + 1, KM_SLEEP); 1467 bcopy(s, new, len); 1468 new[len] = '\0'; 1469 1470 return (new); 1471} 1472 1473void 1474spa_strfree(char *s) 1475{ 1476 kmem_free(s, strlen(s) + 1); 1477} 1478 1479uint64_t 1480spa_get_random(uint64_t range) 1481{ 1482 uint64_t r; 1483 1484 ASSERT(range != 0); 1485 1486 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1487 1488 return (r % range); 1489} 1490 1491uint64_t 1492spa_generate_guid(spa_t *spa) 1493{ 1494 uint64_t guid = spa_get_random(-1ULL); 1495 1496 if (spa != NULL) { 1497 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1498 guid = spa_get_random(-1ULL); 1499 } else { 1500 while (guid == 0 || spa_guid_exists(guid, 0)) 1501 guid = spa_get_random(-1ULL); 1502 } 1503 1504 return (guid); 1505} 1506 1507void 1508snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1509{ 1510 char type[256]; 1511 char *checksum = NULL; 1512 char *compress = NULL; 1513 1514 if (bp != NULL) { 1515 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1516 dmu_object_byteswap_t bswap = 1517 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1518 (void) snprintf(type, sizeof (type), "bswap %s %s", 1519 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1520 "metadata" : "data", 1521 dmu_ot_byteswap[bswap].ob_name); 1522 } else { 1523 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1524 sizeof (type)); 1525 } 1526 if (!BP_IS_EMBEDDED(bp)) { 1527 checksum = 1528 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1529 } 1530 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1531 } 1532 1533 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1534 compress); 1535} 1536 1537void 1538spa_freeze(spa_t *spa) 1539{ 1540 uint64_t freeze_txg = 0; 1541 1542 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1543 if (spa->spa_freeze_txg == UINT64_MAX) { 1544 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1545 spa->spa_freeze_txg = freeze_txg; 1546 } 1547 spa_config_exit(spa, SCL_ALL, FTAG); 1548 if (freeze_txg != 0) 1549 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1550} 1551 1552void 1553zfs_panic_recover(const char *fmt, ...) 1554{ 1555 va_list adx; 1556 1557 va_start(adx, fmt); 1558 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1559 va_end(adx); 1560} 1561 1562/* 1563 * This is a stripped-down version of strtoull, suitable only for converting 1564 * lowercase hexadecimal numbers that don't overflow. 1565 */ 1566uint64_t 1567zfs_strtonum(const char *str, char **nptr) 1568{ 1569 uint64_t val = 0; 1570 char c; 1571 int digit; 1572 1573 while ((c = *str) != '\0') { 1574 if (c >= '0' && c <= '9') 1575 digit = c - '0'; 1576 else if (c >= 'a' && c <= 'f') 1577 digit = 10 + c - 'a'; 1578 else 1579 break; 1580 1581 val *= 16; 1582 val += digit; 1583 1584 str++; 1585 } 1586 1587 if (nptr) 1588 *nptr = (char *)str; 1589 1590 return (val); 1591} 1592 1593/* 1594 * ========================================================================== 1595 * Accessor functions 1596 * ========================================================================== 1597 */ 1598 1599boolean_t 1600spa_shutting_down(spa_t *spa) 1601{ 1602 return (spa->spa_async_suspended); 1603} 1604 1605dsl_pool_t * 1606spa_get_dsl(spa_t *spa) 1607{ 1608 return (spa->spa_dsl_pool); 1609} 1610 1611boolean_t 1612spa_is_initializing(spa_t *spa) 1613{ 1614 return (spa->spa_is_initializing); 1615} 1616 1617blkptr_t * 1618spa_get_rootblkptr(spa_t *spa) 1619{ 1620 return (&spa->spa_ubsync.ub_rootbp); 1621} 1622 1623void 1624spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1625{ 1626 spa->spa_uberblock.ub_rootbp = *bp; 1627} 1628 1629void 1630spa_altroot(spa_t *spa, char *buf, size_t buflen) 1631{ 1632 if (spa->spa_root == NULL) 1633 buf[0] = '\0'; 1634 else 1635 (void) strncpy(buf, spa->spa_root, buflen); 1636} 1637 1638int 1639spa_sync_pass(spa_t *spa) 1640{ 1641 return (spa->spa_sync_pass); 1642} 1643 1644char * 1645spa_name(spa_t *spa) 1646{ 1647 return (spa->spa_name); 1648} 1649 1650uint64_t 1651spa_guid(spa_t *spa) 1652{ 1653 dsl_pool_t *dp = spa_get_dsl(spa); 1654 uint64_t guid; 1655 1656 /* 1657 * If we fail to parse the config during spa_load(), we can go through 1658 * the error path (which posts an ereport) and end up here with no root 1659 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1660 * this case. 1661 */ 1662 if (spa->spa_root_vdev == NULL) 1663 return (spa->spa_config_guid); 1664 1665 guid = spa->spa_last_synced_guid != 0 ? 1666 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1667 1668 /* 1669 * Return the most recently synced out guid unless we're 1670 * in syncing context. 1671 */ 1672 if (dp && dsl_pool_sync_context(dp)) 1673 return (spa->spa_root_vdev->vdev_guid); 1674 else 1675 return (guid); 1676} 1677 1678uint64_t 1679spa_load_guid(spa_t *spa) 1680{ 1681 /* 1682 * This is a GUID that exists solely as a reference for the 1683 * purposes of the arc. It is generated at load time, and 1684 * is never written to persistent storage. 1685 */ 1686 return (spa->spa_load_guid); 1687} 1688 1689uint64_t 1690spa_last_synced_txg(spa_t *spa) 1691{ 1692 return (spa->spa_ubsync.ub_txg); 1693} 1694 1695uint64_t 1696spa_first_txg(spa_t *spa) 1697{ 1698 return (spa->spa_first_txg); 1699} 1700 1701uint64_t 1702spa_syncing_txg(spa_t *spa) 1703{ 1704 return (spa->spa_syncing_txg); 1705} 1706 1707pool_state_t 1708spa_state(spa_t *spa) 1709{ 1710 return (spa->spa_state); 1711} 1712 1713spa_load_state_t 1714spa_load_state(spa_t *spa) 1715{ 1716 return (spa->spa_load_state); 1717} 1718 1719uint64_t 1720spa_freeze_txg(spa_t *spa) 1721{ 1722 return (spa->spa_freeze_txg); 1723} 1724 1725/* ARGSUSED */ 1726uint64_t 1727spa_get_asize(spa_t *spa, uint64_t lsize) 1728{ 1729 return (lsize * spa_asize_inflation); 1730} 1731 1732/* 1733 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1734 * or at least 128MB, unless that would cause it to be more than half the 1735 * pool size. 1736 * 1737 * See the comment above spa_slop_shift for details. 1738 */ 1739uint64_t 1740spa_get_slop_space(spa_t *spa) 1741{ 1742 uint64_t space = spa_get_dspace(spa); 1743 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1744} 1745 1746uint64_t 1747spa_get_dspace(spa_t *spa) 1748{ 1749 return (spa->spa_dspace); 1750} 1751 1752void 1753spa_update_dspace(spa_t *spa) 1754{ 1755 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1756 ddt_get_dedup_dspace(spa); 1757} 1758 1759/* 1760 * Return the failure mode that has been set to this pool. The default 1761 * behavior will be to block all I/Os when a complete failure occurs. 1762 */ 1763uint8_t 1764spa_get_failmode(spa_t *spa) 1765{ 1766 return (spa->spa_failmode); 1767} 1768 1769boolean_t 1770spa_suspended(spa_t *spa) 1771{ 1772 return (spa->spa_suspended); 1773} 1774 1775uint64_t 1776spa_version(spa_t *spa) 1777{ 1778 return (spa->spa_ubsync.ub_version); 1779} 1780 1781boolean_t 1782spa_deflate(spa_t *spa) 1783{ 1784 return (spa->spa_deflate); 1785} 1786 1787metaslab_class_t * 1788spa_normal_class(spa_t *spa) 1789{ 1790 return (spa->spa_normal_class); 1791} 1792 1793metaslab_class_t * 1794spa_log_class(spa_t *spa) 1795{ 1796 return (spa->spa_log_class); 1797} 1798 1799void 1800spa_evicting_os_register(spa_t *spa, objset_t *os) 1801{ 1802 mutex_enter(&spa->spa_evicting_os_lock); 1803 list_insert_head(&spa->spa_evicting_os_list, os); 1804 mutex_exit(&spa->spa_evicting_os_lock); 1805} 1806 1807void 1808spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1809{ 1810 mutex_enter(&spa->spa_evicting_os_lock); 1811 list_remove(&spa->spa_evicting_os_list, os); 1812 cv_broadcast(&spa->spa_evicting_os_cv); 1813 mutex_exit(&spa->spa_evicting_os_lock); 1814} 1815 1816void 1817spa_evicting_os_wait(spa_t *spa) 1818{ 1819 mutex_enter(&spa->spa_evicting_os_lock); 1820 while (!list_is_empty(&spa->spa_evicting_os_list)) 1821 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1822 mutex_exit(&spa->spa_evicting_os_lock); 1823 1824 dmu_buf_user_evict_wait(); 1825} 1826 1827int 1828spa_max_replication(spa_t *spa) 1829{ 1830 /* 1831 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1832 * handle BPs with more than one DVA allocated. Set our max 1833 * replication level accordingly. 1834 */ 1835 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1836 return (1); 1837 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1838} 1839 1840int 1841spa_prev_software_version(spa_t *spa) 1842{ 1843 return (spa->spa_prev_software_version); 1844} 1845 1846uint64_t 1847spa_deadman_synctime(spa_t *spa) 1848{ 1849 return (spa->spa_deadman_synctime); 1850} 1851 1852uint64_t 1853dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1854{ 1855 uint64_t asize = DVA_GET_ASIZE(dva); 1856 uint64_t dsize = asize; 1857 1858 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1859 1860 if (asize != 0 && spa->spa_deflate) { 1861 uint64_t vdev = DVA_GET_VDEV(dva); 1862 vdev_t *vd = vdev_lookup_top(spa, vdev); 1863 if (vd == NULL) { 1864 panic( 1865 "dva_get_dsize_sync(): bad DVA %llu:%llu", 1866 (u_longlong_t)vdev, (u_longlong_t)asize); 1867 } 1868 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1869 } 1870 1871 return (dsize); 1872} 1873 1874uint64_t 1875bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1876{ 1877 uint64_t dsize = 0; 1878 1879 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1880 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1881 1882 return (dsize); 1883} 1884 1885uint64_t 1886bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1887{ 1888 uint64_t dsize = 0; 1889 1890 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1891 1892 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1893 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1894 1895 spa_config_exit(spa, SCL_VDEV, FTAG); 1896 1897 return (dsize); 1898} 1899 1900/* 1901 * ========================================================================== 1902 * Initialization and Termination 1903 * ========================================================================== 1904 */ 1905 1906static int 1907spa_name_compare(const void *a1, const void *a2) 1908{ 1909 const spa_t *s1 = a1; 1910 const spa_t *s2 = a2; 1911 int s; 1912 1913 s = strcmp(s1->spa_name, s2->spa_name); 1914 if (s > 0) 1915 return (1); 1916 if (s < 0) 1917 return (-1); 1918 return (0); 1919} 1920 1921int 1922spa_busy(void) 1923{ 1924 return (spa_active_count); 1925} 1926 1927void 1928spa_boot_init() 1929{ 1930 spa_config_load(); 1931} 1932 1933#ifdef _KERNEL 1934EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1935#endif 1936 1937void 1938spa_init(int mode) 1939{ 1940 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1941 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1942 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1943 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1944 1945 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1946 offsetof(spa_t, spa_avl)); 1947 1948 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1949 offsetof(spa_aux_t, aux_avl)); 1950 1951 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1952 offsetof(spa_aux_t, aux_avl)); 1953 1954 spa_mode_global = mode; 1955 1956#ifdef illumos 1957#ifdef _KERNEL 1958 spa_arch_init(); 1959#else 1960 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1961 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1962 if (arc_procfd == -1) { 1963 perror("could not enable watchpoints: " 1964 "opening /proc/self/ctl failed: "); 1965 } else { 1966 arc_watch = B_TRUE; 1967 } 1968 } 1969#endif 1970#endif /* illumos */ 1971 refcount_sysinit(); 1972 unique_init(); 1973 range_tree_init(); 1974 zio_init(); 1975 lz4_init(); 1976 dmu_init(); 1977 zil_init(); 1978 vdev_cache_stat_init(); 1979 zfs_prop_init(); 1980 zpool_prop_init(); 1981 zpool_feature_init(); 1982 spa_config_load(); 1983 l2arc_start(); 1984#ifndef illumos 1985#ifdef _KERNEL 1986 zfs_deadman_init(); 1987#endif 1988#endif /* !illumos */ 1989} 1990 1991void 1992spa_fini(void) 1993{ 1994 l2arc_stop(); 1995 1996 spa_evict_all(); 1997 1998 vdev_cache_stat_fini(); 1999 zil_fini(); 2000 dmu_fini(); 2001 lz4_fini(); 2002 zio_fini(); 2003 range_tree_fini(); 2004 unique_fini(); 2005 refcount_fini(); 2006 2007 avl_destroy(&spa_namespace_avl); 2008 avl_destroy(&spa_spare_avl); 2009 avl_destroy(&spa_l2cache_avl); 2010 2011 cv_destroy(&spa_namespace_cv); 2012 mutex_destroy(&spa_namespace_lock); 2013 mutex_destroy(&spa_spare_lock); 2014 mutex_destroy(&spa_l2cache_lock); 2015} 2016 2017/* 2018 * Return whether this pool has slogs. No locking needed. 2019 * It's not a problem if the wrong answer is returned as it's only for 2020 * performance and not correctness 2021 */ 2022boolean_t 2023spa_has_slogs(spa_t *spa) 2024{ 2025 return (spa->spa_log_class->mc_rotor != NULL); 2026} 2027 2028spa_log_state_t 2029spa_get_log_state(spa_t *spa) 2030{ 2031 return (spa->spa_log_state); 2032} 2033 2034void 2035spa_set_log_state(spa_t *spa, spa_log_state_t state) 2036{ 2037 spa->spa_log_state = state; 2038} 2039 2040boolean_t 2041spa_is_root(spa_t *spa) 2042{ 2043 return (spa->spa_is_root); 2044} 2045 2046boolean_t 2047spa_writeable(spa_t *spa) 2048{ 2049 return (!!(spa->spa_mode & FWRITE)); 2050} 2051 2052/* 2053 * Returns true if there is a pending sync task in any of the current 2054 * syncing txg, the current quiescing txg, or the current open txg. 2055 */ 2056boolean_t 2057spa_has_pending_synctask(spa_t *spa) 2058{ 2059 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2060} 2061 2062int 2063spa_mode(spa_t *spa) 2064{ 2065 return (spa->spa_mode); 2066} 2067 2068uint64_t 2069spa_bootfs(spa_t *spa) 2070{ 2071 return (spa->spa_bootfs); 2072} 2073 2074uint64_t 2075spa_delegation(spa_t *spa) 2076{ 2077 return (spa->spa_delegation); 2078} 2079 2080objset_t * 2081spa_meta_objset(spa_t *spa) 2082{ 2083 return (spa->spa_meta_objset); 2084} 2085 2086enum zio_checksum 2087spa_dedup_checksum(spa_t *spa) 2088{ 2089 return (spa->spa_dedup_checksum); 2090} 2091 2092/* 2093 * Reset pool scan stat per scan pass (or reboot). 2094 */ 2095void 2096spa_scan_stat_init(spa_t *spa) 2097{ 2098 /* data not stored on disk */ 2099 spa->spa_scan_pass_start = gethrestime_sec(); 2100 spa->spa_scan_pass_exam = 0; 2101 vdev_scan_stat_init(spa->spa_root_vdev); 2102} 2103 2104/* 2105 * Get scan stats for zpool status reports 2106 */ 2107int 2108spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2109{ 2110 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2111 2112 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2113 return (SET_ERROR(ENOENT)); 2114 bzero(ps, sizeof (pool_scan_stat_t)); 2115 2116 /* data stored on disk */ 2117 ps->pss_func = scn->scn_phys.scn_func; 2118 ps->pss_start_time = scn->scn_phys.scn_start_time; 2119 ps->pss_end_time = scn->scn_phys.scn_end_time; 2120 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2121 ps->pss_examined = scn->scn_phys.scn_examined; 2122 ps->pss_to_process = scn->scn_phys.scn_to_process; 2123 ps->pss_processed = scn->scn_phys.scn_processed; 2124 ps->pss_errors = scn->scn_phys.scn_errors; 2125 ps->pss_state = scn->scn_phys.scn_state; 2126 2127 /* data not stored on disk */ 2128 ps->pss_pass_start = spa->spa_scan_pass_start; 2129 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2130 2131 return (0); 2132} 2133 2134boolean_t 2135spa_debug_enabled(spa_t *spa) 2136{ 2137 return (spa->spa_debug); 2138} 2139 2140int 2141spa_maxblocksize(spa_t *spa) 2142{ 2143 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2144 return (SPA_MAXBLOCKSIZE); 2145 else 2146 return (SPA_OLD_MAXBLOCKSIZE); 2147} 2148