spa_misc.c revision 297092
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 */ 29 30#include <sys/zfs_context.h> 31#include <sys/spa_impl.h> 32#include <sys/spa_boot.h> 33#include <sys/zio.h> 34#include <sys/zio_checksum.h> 35#include <sys/zio_compress.h> 36#include <sys/dmu.h> 37#include <sys/dmu_tx.h> 38#include <sys/zap.h> 39#include <sys/zil.h> 40#include <sys/vdev_impl.h> 41#include <sys/metaslab.h> 42#include <sys/uberblock_impl.h> 43#include <sys/txg.h> 44#include <sys/avl.h> 45#include <sys/unique.h> 46#include <sys/dsl_pool.h> 47#include <sys/dsl_dir.h> 48#include <sys/dsl_prop.h> 49#include <sys/dsl_scan.h> 50#include <sys/fs/zfs.h> 51#include <sys/metaslab_impl.h> 52#include <sys/arc.h> 53#include <sys/ddt.h> 54#include "zfs_prop.h" 55#include <sys/zfeature.h> 56 57/* 58 * SPA locking 59 * 60 * There are four basic locks for managing spa_t structures: 61 * 62 * spa_namespace_lock (global mutex) 63 * 64 * This lock must be acquired to do any of the following: 65 * 66 * - Lookup a spa_t by name 67 * - Add or remove a spa_t from the namespace 68 * - Increase spa_refcount from non-zero 69 * - Check if spa_refcount is zero 70 * - Rename a spa_t 71 * - add/remove/attach/detach devices 72 * - Held for the duration of create/destroy/import/export 73 * 74 * It does not need to handle recursion. A create or destroy may 75 * reference objects (files or zvols) in other pools, but by 76 * definition they must have an existing reference, and will never need 77 * to lookup a spa_t by name. 78 * 79 * spa_refcount (per-spa refcount_t protected by mutex) 80 * 81 * This reference count keep track of any active users of the spa_t. The 82 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 83 * the refcount is never really 'zero' - opening a pool implicitly keeps 84 * some references in the DMU. Internally we check against spa_minref, but 85 * present the image of a zero/non-zero value to consumers. 86 * 87 * spa_config_lock[] (per-spa array of rwlocks) 88 * 89 * This protects the spa_t from config changes, and must be held in 90 * the following circumstances: 91 * 92 * - RW_READER to perform I/O to the spa 93 * - RW_WRITER to change the vdev config 94 * 95 * The locking order is fairly straightforward: 96 * 97 * spa_namespace_lock -> spa_refcount 98 * 99 * The namespace lock must be acquired to increase the refcount from 0 100 * or to check if it is zero. 101 * 102 * spa_refcount -> spa_config_lock[] 103 * 104 * There must be at least one valid reference on the spa_t to acquire 105 * the config lock. 106 * 107 * spa_namespace_lock -> spa_config_lock[] 108 * 109 * The namespace lock must always be taken before the config lock. 110 * 111 * 112 * The spa_namespace_lock can be acquired directly and is globally visible. 113 * 114 * The namespace is manipulated using the following functions, all of which 115 * require the spa_namespace_lock to be held. 116 * 117 * spa_lookup() Lookup a spa_t by name. 118 * 119 * spa_add() Create a new spa_t in the namespace. 120 * 121 * spa_remove() Remove a spa_t from the namespace. This also 122 * frees up any memory associated with the spa_t. 123 * 124 * spa_next() Returns the next spa_t in the system, or the 125 * first if NULL is passed. 126 * 127 * spa_evict_all() Shutdown and remove all spa_t structures in 128 * the system. 129 * 130 * spa_guid_exists() Determine whether a pool/device guid exists. 131 * 132 * The spa_refcount is manipulated using the following functions: 133 * 134 * spa_open_ref() Adds a reference to the given spa_t. Must be 135 * called with spa_namespace_lock held if the 136 * refcount is currently zero. 137 * 138 * spa_close() Remove a reference from the spa_t. This will 139 * not free the spa_t or remove it from the 140 * namespace. No locking is required. 141 * 142 * spa_refcount_zero() Returns true if the refcount is currently 143 * zero. Must be called with spa_namespace_lock 144 * held. 145 * 146 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 147 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 148 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 149 * 150 * To read the configuration, it suffices to hold one of these locks as reader. 151 * To modify the configuration, you must hold all locks as writer. To modify 152 * vdev state without altering the vdev tree's topology (e.g. online/offline), 153 * you must hold SCL_STATE and SCL_ZIO as writer. 154 * 155 * We use these distinct config locks to avoid recursive lock entry. 156 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 157 * block allocations (SCL_ALLOC), which may require reading space maps 158 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 159 * 160 * The spa config locks cannot be normal rwlocks because we need the 161 * ability to hand off ownership. For example, SCL_ZIO is acquired 162 * by the issuing thread and later released by an interrupt thread. 163 * They do, however, obey the usual write-wanted semantics to prevent 164 * writer (i.e. system administrator) starvation. 165 * 166 * The lock acquisition rules are as follows: 167 * 168 * SCL_CONFIG 169 * Protects changes to the vdev tree topology, such as vdev 170 * add/remove/attach/detach. Protects the dirty config list 171 * (spa_config_dirty_list) and the set of spares and l2arc devices. 172 * 173 * SCL_STATE 174 * Protects changes to pool state and vdev state, such as vdev 175 * online/offline/fault/degrade/clear. Protects the dirty state list 176 * (spa_state_dirty_list) and global pool state (spa_state). 177 * 178 * SCL_ALLOC 179 * Protects changes to metaslab groups and classes. 180 * Held as reader by metaslab_alloc() and metaslab_claim(). 181 * 182 * SCL_ZIO 183 * Held by bp-level zios (those which have no io_vd upon entry) 184 * to prevent changes to the vdev tree. The bp-level zio implicitly 185 * protects all of its vdev child zios, which do not hold SCL_ZIO. 186 * 187 * SCL_FREE 188 * Protects changes to metaslab groups and classes. 189 * Held as reader by metaslab_free(). SCL_FREE is distinct from 190 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 191 * blocks in zio_done() while another i/o that holds either 192 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 193 * 194 * SCL_VDEV 195 * Held as reader to prevent changes to the vdev tree during trivial 196 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 197 * other locks, and lower than all of them, to ensure that it's safe 198 * to acquire regardless of caller context. 199 * 200 * In addition, the following rules apply: 201 * 202 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 203 * The lock ordering is SCL_CONFIG > spa_props_lock. 204 * 205 * (b) I/O operations on leaf vdevs. For any zio operation that takes 206 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 207 * or zio_write_phys() -- the caller must ensure that the config cannot 208 * cannot change in the interim, and that the vdev cannot be reopened. 209 * SCL_STATE as reader suffices for both. 210 * 211 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 212 * 213 * spa_vdev_enter() Acquire the namespace lock and the config lock 214 * for writing. 215 * 216 * spa_vdev_exit() Release the config lock, wait for all I/O 217 * to complete, sync the updated configs to the 218 * cache, and release the namespace lock. 219 * 220 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 221 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 222 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 223 * 224 * spa_rename() is also implemented within this file since it requires 225 * manipulation of the namespace. 226 */ 227 228static avl_tree_t spa_namespace_avl; 229kmutex_t spa_namespace_lock; 230static kcondvar_t spa_namespace_cv; 231static int spa_active_count; 232int spa_max_replication_override = SPA_DVAS_PER_BP; 233 234static kmutex_t spa_spare_lock; 235static avl_tree_t spa_spare_avl; 236static kmutex_t spa_l2cache_lock; 237static avl_tree_t spa_l2cache_avl; 238 239kmem_cache_t *spa_buffer_pool; 240int spa_mode_global; 241 242#ifdef ZFS_DEBUG 243/* Everything except dprintf and spa is on by default in debug builds */ 244int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 245#else 246int zfs_flags = 0; 247#endif 248SYSCTL_DECL(_debug); 249TUNABLE_INT("debug.zfs_flags", &zfs_flags); 250SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 251 "ZFS debug flags."); 252 253/* 254 * zfs_recover can be set to nonzero to attempt to recover from 255 * otherwise-fatal errors, typically caused by on-disk corruption. When 256 * set, calls to zfs_panic_recover() will turn into warning messages. 257 * This should only be used as a last resort, as it typically results 258 * in leaked space, or worse. 259 */ 260boolean_t zfs_recover = B_FALSE; 261SYSCTL_DECL(_vfs_zfs); 262TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 263SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 264 "Try to recover from otherwise-fatal errors."); 265 266static int 267sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 268{ 269 int err, val; 270 271 val = zfs_flags; 272 err = sysctl_handle_int(oidp, &val, 0, req); 273 if (err != 0 || req->newptr == NULL) 274 return (err); 275 276 /* 277 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 278 * arc buffers in the system have the necessary additional 279 * checksum data. However, it is safe to disable at any 280 * time. 281 */ 282 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 283 val &= ~ZFS_DEBUG_MODIFY; 284 zfs_flags = val; 285 286 return (0); 287} 288TUNABLE_INT("vfs.zfs.debug_flags", &zfs_flags); 289SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 290 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 291 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 292 293/* 294 * If destroy encounters an EIO while reading metadata (e.g. indirect 295 * blocks), space referenced by the missing metadata can not be freed. 296 * Normally this causes the background destroy to become "stalled", as 297 * it is unable to make forward progress. While in this stalled state, 298 * all remaining space to free from the error-encountering filesystem is 299 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 300 * permanently leak the space from indirect blocks that can not be read, 301 * and continue to free everything else that it can. 302 * 303 * The default, "stalling" behavior is useful if the storage partially 304 * fails (i.e. some but not all i/os fail), and then later recovers. In 305 * this case, we will be able to continue pool operations while it is 306 * partially failed, and when it recovers, we can continue to free the 307 * space, with no leaks. However, note that this case is actually 308 * fairly rare. 309 * 310 * Typically pools either (a) fail completely (but perhaps temporarily, 311 * e.g. a top-level vdev going offline), or (b) have localized, 312 * permanent errors (e.g. disk returns the wrong data due to bit flip or 313 * firmware bug). In case (a), this setting does not matter because the 314 * pool will be suspended and the sync thread will not be able to make 315 * forward progress regardless. In case (b), because the error is 316 * permanent, the best we can do is leak the minimum amount of space, 317 * which is what setting this flag will do. Therefore, it is reasonable 318 * for this flag to normally be set, but we chose the more conservative 319 * approach of not setting it, so that there is no possibility of 320 * leaking space in the "partial temporary" failure case. 321 */ 322boolean_t zfs_free_leak_on_eio = B_FALSE; 323 324/* 325 * Expiration time in milliseconds. This value has two meanings. First it is 326 * used to determine when the spa_deadman() logic should fire. By default the 327 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 328 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 329 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 330 * in a system panic. 331 */ 332uint64_t zfs_deadman_synctime_ms = 1000000ULL; 333TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 334SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 335 &zfs_deadman_synctime_ms, 0, 336 "Stalled ZFS I/O expiration time in milliseconds"); 337 338/* 339 * Check time in milliseconds. This defines the frequency at which we check 340 * for hung I/O. 341 */ 342uint64_t zfs_deadman_checktime_ms = 5000ULL; 343TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 344SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 345 &zfs_deadman_checktime_ms, 0, 346 "Period of checks for stalled ZFS I/O in milliseconds"); 347 348/* 349 * Default value of -1 for zfs_deadman_enabled is resolved in 350 * zfs_deadman_init() 351 */ 352int zfs_deadman_enabled = -1; 353TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 354SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 355 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 356 357/* 358 * The worst case is single-sector max-parity RAID-Z blocks, in which 359 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 360 * times the size; so just assume that. Add to this the fact that 361 * we can have up to 3 DVAs per bp, and one more factor of 2 because 362 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 363 * the worst case is: 364 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 365 */ 366int spa_asize_inflation = 24; 367TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation); 368SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 369 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 370 371#ifndef illumos 372#ifdef _KERNEL 373static void 374zfs_deadman_init() 375{ 376 /* 377 * If we are not i386 or amd64 or in a virtual machine, 378 * disable ZFS deadman thread by default 379 */ 380 if (zfs_deadman_enabled == -1) { 381#if defined(__amd64__) || defined(__i386__) 382 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 383#else 384 zfs_deadman_enabled = 0; 385#endif 386 } 387} 388#endif /* _KERNEL */ 389#endif /* !illumos */ 390 391/* 392 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 393 * the pool to be consumed. This ensures that we don't run the pool 394 * completely out of space, due to unaccounted changes (e.g. to the MOS). 395 * It also limits the worst-case time to allocate space. If we have 396 * less than this amount of free space, most ZPL operations (e.g. write, 397 * create) will return ENOSPC. 398 * 399 * Certain operations (e.g. file removal, most administrative actions) can 400 * use half the slop space. They will only return ENOSPC if less than half 401 * the slop space is free. Typically, once the pool has less than the slop 402 * space free, the user will use these operations to free up space in the pool. 403 * These are the operations that call dsl_pool_adjustedsize() with the netfree 404 * argument set to TRUE. 405 * 406 * A very restricted set of operations are always permitted, regardless of 407 * the amount of free space. These are the operations that call 408 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 409 * operations result in a net increase in the amount of space used, 410 * it is possible to run the pool completely out of space, causing it to 411 * be permanently read-only. 412 * 413 * See also the comments in zfs_space_check_t. 414 */ 415int spa_slop_shift = 5; 416SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 417 &spa_slop_shift, 0, 418 "Shift value of reserved space (1/(2^spa_slop_shift))."); 419 420/* 421 * ========================================================================== 422 * SPA config locking 423 * ========================================================================== 424 */ 425static void 426spa_config_lock_init(spa_t *spa) 427{ 428 for (int i = 0; i < SCL_LOCKS; i++) { 429 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 430 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 431 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 432 refcount_create_untracked(&scl->scl_count); 433 scl->scl_writer = NULL; 434 scl->scl_write_wanted = 0; 435 } 436} 437 438static void 439spa_config_lock_destroy(spa_t *spa) 440{ 441 for (int i = 0; i < SCL_LOCKS; i++) { 442 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 443 mutex_destroy(&scl->scl_lock); 444 cv_destroy(&scl->scl_cv); 445 refcount_destroy(&scl->scl_count); 446 ASSERT(scl->scl_writer == NULL); 447 ASSERT(scl->scl_write_wanted == 0); 448 } 449} 450 451int 452spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 453{ 454 for (int i = 0; i < SCL_LOCKS; i++) { 455 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 456 if (!(locks & (1 << i))) 457 continue; 458 mutex_enter(&scl->scl_lock); 459 if (rw == RW_READER) { 460 if (scl->scl_writer || scl->scl_write_wanted) { 461 mutex_exit(&scl->scl_lock); 462 spa_config_exit(spa, locks & ((1 << i) - 1), 463 tag); 464 return (0); 465 } 466 } else { 467 ASSERT(scl->scl_writer != curthread); 468 if (!refcount_is_zero(&scl->scl_count)) { 469 mutex_exit(&scl->scl_lock); 470 spa_config_exit(spa, locks & ((1 << i) - 1), 471 tag); 472 return (0); 473 } 474 scl->scl_writer = curthread; 475 } 476 (void) refcount_add(&scl->scl_count, tag); 477 mutex_exit(&scl->scl_lock); 478 } 479 return (1); 480} 481 482void 483spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 484{ 485 int wlocks_held = 0; 486 487 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 488 489 for (int i = 0; i < SCL_LOCKS; i++) { 490 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 491 if (scl->scl_writer == curthread) 492 wlocks_held |= (1 << i); 493 if (!(locks & (1 << i))) 494 continue; 495 mutex_enter(&scl->scl_lock); 496 if (rw == RW_READER) { 497 while (scl->scl_writer || scl->scl_write_wanted) { 498 cv_wait(&scl->scl_cv, &scl->scl_lock); 499 } 500 } else { 501 ASSERT(scl->scl_writer != curthread); 502 while (!refcount_is_zero(&scl->scl_count)) { 503 scl->scl_write_wanted++; 504 cv_wait(&scl->scl_cv, &scl->scl_lock); 505 scl->scl_write_wanted--; 506 } 507 scl->scl_writer = curthread; 508 } 509 (void) refcount_add(&scl->scl_count, tag); 510 mutex_exit(&scl->scl_lock); 511 } 512 ASSERT(wlocks_held <= locks); 513} 514 515void 516spa_config_exit(spa_t *spa, int locks, void *tag) 517{ 518 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 519 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 520 if (!(locks & (1 << i))) 521 continue; 522 mutex_enter(&scl->scl_lock); 523 ASSERT(!refcount_is_zero(&scl->scl_count)); 524 if (refcount_remove(&scl->scl_count, tag) == 0) { 525 ASSERT(scl->scl_writer == NULL || 526 scl->scl_writer == curthread); 527 scl->scl_writer = NULL; /* OK in either case */ 528 cv_broadcast(&scl->scl_cv); 529 } 530 mutex_exit(&scl->scl_lock); 531 } 532} 533 534int 535spa_config_held(spa_t *spa, int locks, krw_t rw) 536{ 537 int locks_held = 0; 538 539 for (int i = 0; i < SCL_LOCKS; i++) { 540 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 541 if (!(locks & (1 << i))) 542 continue; 543 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 544 (rw == RW_WRITER && scl->scl_writer == curthread)) 545 locks_held |= 1 << i; 546 } 547 548 return (locks_held); 549} 550 551/* 552 * ========================================================================== 553 * SPA namespace functions 554 * ========================================================================== 555 */ 556 557/* 558 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 559 * Returns NULL if no matching spa_t is found. 560 */ 561spa_t * 562spa_lookup(const char *name) 563{ 564 static spa_t search; /* spa_t is large; don't allocate on stack */ 565 spa_t *spa; 566 avl_index_t where; 567 char *cp; 568 569 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 570 571 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 572 573 /* 574 * If it's a full dataset name, figure out the pool name and 575 * just use that. 576 */ 577 cp = strpbrk(search.spa_name, "/@#"); 578 if (cp != NULL) 579 *cp = '\0'; 580 581 spa = avl_find(&spa_namespace_avl, &search, &where); 582 583 return (spa); 584} 585 586/* 587 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 588 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 589 * looking for potentially hung I/Os. 590 */ 591void 592spa_deadman(void *arg) 593{ 594 spa_t *spa = arg; 595 596 /* 597 * Disable the deadman timer if the pool is suspended. 598 */ 599 if (spa_suspended(spa)) { 600#ifdef illumos 601 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 602#else 603 /* Nothing. just don't schedule any future callouts. */ 604#endif 605 return; 606 } 607 608 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 609 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 610 ++spa->spa_deadman_calls); 611 if (zfs_deadman_enabled) 612 vdev_deadman(spa->spa_root_vdev); 613#ifdef __FreeBSD__ 614#ifdef _KERNEL 615 callout_schedule(&spa->spa_deadman_cycid, 616 hz * zfs_deadman_checktime_ms / MILLISEC); 617#endif 618#endif 619} 620 621/* 622 * Create an uninitialized spa_t with the given name. Requires 623 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 624 * exist by calling spa_lookup() first. 625 */ 626spa_t * 627spa_add(const char *name, nvlist_t *config, const char *altroot) 628{ 629 spa_t *spa; 630 spa_config_dirent_t *dp; 631#ifdef illumos 632 cyc_handler_t hdlr; 633 cyc_time_t when; 634#endif 635 636 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 637 638 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 639 640 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 641 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 642 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 643 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 644 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 645 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 646 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 647 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 648 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 649 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 650 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 651 652 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 653 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 654 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 655 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 656 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 657 658 for (int t = 0; t < TXG_SIZE; t++) 659 bplist_create(&spa->spa_free_bplist[t]); 660 661 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 662 spa->spa_state = POOL_STATE_UNINITIALIZED; 663 spa->spa_freeze_txg = UINT64_MAX; 664 spa->spa_final_txg = UINT64_MAX; 665 spa->spa_load_max_txg = UINT64_MAX; 666 spa->spa_proc = &p0; 667 spa->spa_proc_state = SPA_PROC_NONE; 668 669#ifdef illumos 670 hdlr.cyh_func = spa_deadman; 671 hdlr.cyh_arg = spa; 672 hdlr.cyh_level = CY_LOW_LEVEL; 673#endif 674 675 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 676 677#ifdef illumos 678 /* 679 * This determines how often we need to check for hung I/Os after 680 * the cyclic has already fired. Since checking for hung I/Os is 681 * an expensive operation we don't want to check too frequently. 682 * Instead wait for 5 seconds before checking again. 683 */ 684 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 685 when.cyt_when = CY_INFINITY; 686 mutex_enter(&cpu_lock); 687 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 688 mutex_exit(&cpu_lock); 689#else /* !illumos */ 690#ifdef _KERNEL 691 callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE); 692#endif 693#endif 694 refcount_create(&spa->spa_refcount); 695 spa_config_lock_init(spa); 696 697 avl_add(&spa_namespace_avl, spa); 698 699 /* 700 * Set the alternate root, if there is one. 701 */ 702 if (altroot) { 703 spa->spa_root = spa_strdup(altroot); 704 spa_active_count++; 705 } 706 707 /* 708 * Every pool starts with the default cachefile 709 */ 710 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 711 offsetof(spa_config_dirent_t, scd_link)); 712 713 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 714 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 715 list_insert_head(&spa->spa_config_list, dp); 716 717 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 718 KM_SLEEP) == 0); 719 720 if (config != NULL) { 721 nvlist_t *features; 722 723 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 724 &features) == 0) { 725 VERIFY(nvlist_dup(features, &spa->spa_label_features, 726 0) == 0); 727 } 728 729 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 730 } 731 732 if (spa->spa_label_features == NULL) { 733 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 734 KM_SLEEP) == 0); 735 } 736 737 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 738 739 spa->spa_min_ashift = INT_MAX; 740 spa->spa_max_ashift = 0; 741 742 /* 743 * As a pool is being created, treat all features as disabled by 744 * setting SPA_FEATURE_DISABLED for all entries in the feature 745 * refcount cache. 746 */ 747 for (int i = 0; i < SPA_FEATURES; i++) { 748 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 749 } 750 751 return (spa); 752} 753 754/* 755 * Removes a spa_t from the namespace, freeing up any memory used. Requires 756 * spa_namespace_lock. This is called only after the spa_t has been closed and 757 * deactivated. 758 */ 759void 760spa_remove(spa_t *spa) 761{ 762 spa_config_dirent_t *dp; 763 764 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 765 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 766 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 767 768 nvlist_free(spa->spa_config_splitting); 769 770 avl_remove(&spa_namespace_avl, spa); 771 cv_broadcast(&spa_namespace_cv); 772 773 if (spa->spa_root) { 774 spa_strfree(spa->spa_root); 775 spa_active_count--; 776 } 777 778 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 779 list_remove(&spa->spa_config_list, dp); 780 if (dp->scd_path != NULL) 781 spa_strfree(dp->scd_path); 782 kmem_free(dp, sizeof (spa_config_dirent_t)); 783 } 784 785 list_destroy(&spa->spa_config_list); 786 787 nvlist_free(spa->spa_label_features); 788 nvlist_free(spa->spa_load_info); 789 spa_config_set(spa, NULL); 790 791#ifdef illumos 792 mutex_enter(&cpu_lock); 793 if (spa->spa_deadman_cycid != CYCLIC_NONE) 794 cyclic_remove(spa->spa_deadman_cycid); 795 mutex_exit(&cpu_lock); 796 spa->spa_deadman_cycid = CYCLIC_NONE; 797#else /* !illumos */ 798#ifdef _KERNEL 799 callout_drain(&spa->spa_deadman_cycid); 800#endif 801#endif 802 803 refcount_destroy(&spa->spa_refcount); 804 805 spa_config_lock_destroy(spa); 806 807 for (int t = 0; t < TXG_SIZE; t++) 808 bplist_destroy(&spa->spa_free_bplist[t]); 809 810 zio_checksum_templates_free(spa); 811 812 cv_destroy(&spa->spa_async_cv); 813 cv_destroy(&spa->spa_evicting_os_cv); 814 cv_destroy(&spa->spa_proc_cv); 815 cv_destroy(&spa->spa_scrub_io_cv); 816 cv_destroy(&spa->spa_suspend_cv); 817 818 mutex_destroy(&spa->spa_async_lock); 819 mutex_destroy(&spa->spa_errlist_lock); 820 mutex_destroy(&spa->spa_errlog_lock); 821 mutex_destroy(&spa->spa_evicting_os_lock); 822 mutex_destroy(&spa->spa_history_lock); 823 mutex_destroy(&spa->spa_proc_lock); 824 mutex_destroy(&spa->spa_props_lock); 825 mutex_destroy(&spa->spa_cksum_tmpls_lock); 826 mutex_destroy(&spa->spa_scrub_lock); 827 mutex_destroy(&spa->spa_suspend_lock); 828 mutex_destroy(&spa->spa_vdev_top_lock); 829 830 kmem_free(spa, sizeof (spa_t)); 831} 832 833/* 834 * Given a pool, return the next pool in the namespace, or NULL if there is 835 * none. If 'prev' is NULL, return the first pool. 836 */ 837spa_t * 838spa_next(spa_t *prev) 839{ 840 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 841 842 if (prev) 843 return (AVL_NEXT(&spa_namespace_avl, prev)); 844 else 845 return (avl_first(&spa_namespace_avl)); 846} 847 848/* 849 * ========================================================================== 850 * SPA refcount functions 851 * ========================================================================== 852 */ 853 854/* 855 * Add a reference to the given spa_t. Must have at least one reference, or 856 * have the namespace lock held. 857 */ 858void 859spa_open_ref(spa_t *spa, void *tag) 860{ 861 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 862 MUTEX_HELD(&spa_namespace_lock)); 863 (void) refcount_add(&spa->spa_refcount, tag); 864} 865 866/* 867 * Remove a reference to the given spa_t. Must have at least one reference, or 868 * have the namespace lock held. 869 */ 870void 871spa_close(spa_t *spa, void *tag) 872{ 873 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 874 MUTEX_HELD(&spa_namespace_lock)); 875 (void) refcount_remove(&spa->spa_refcount, tag); 876} 877 878/* 879 * Remove a reference to the given spa_t held by a dsl dir that is 880 * being asynchronously released. Async releases occur from a taskq 881 * performing eviction of dsl datasets and dirs. The namespace lock 882 * isn't held and the hold by the object being evicted may contribute to 883 * spa_minref (e.g. dataset or directory released during pool export), 884 * so the asserts in spa_close() do not apply. 885 */ 886void 887spa_async_close(spa_t *spa, void *tag) 888{ 889 (void) refcount_remove(&spa->spa_refcount, tag); 890} 891 892/* 893 * Check to see if the spa refcount is zero. Must be called with 894 * spa_namespace_lock held. We really compare against spa_minref, which is the 895 * number of references acquired when opening a pool 896 */ 897boolean_t 898spa_refcount_zero(spa_t *spa) 899{ 900 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 901 902 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 903} 904 905/* 906 * ========================================================================== 907 * SPA spare and l2cache tracking 908 * ========================================================================== 909 */ 910 911/* 912 * Hot spares and cache devices are tracked using the same code below, 913 * for 'auxiliary' devices. 914 */ 915 916typedef struct spa_aux { 917 uint64_t aux_guid; 918 uint64_t aux_pool; 919 avl_node_t aux_avl; 920 int aux_count; 921} spa_aux_t; 922 923static int 924spa_aux_compare(const void *a, const void *b) 925{ 926 const spa_aux_t *sa = a; 927 const spa_aux_t *sb = b; 928 929 if (sa->aux_guid < sb->aux_guid) 930 return (-1); 931 else if (sa->aux_guid > sb->aux_guid) 932 return (1); 933 else 934 return (0); 935} 936 937void 938spa_aux_add(vdev_t *vd, avl_tree_t *avl) 939{ 940 avl_index_t where; 941 spa_aux_t search; 942 spa_aux_t *aux; 943 944 search.aux_guid = vd->vdev_guid; 945 if ((aux = avl_find(avl, &search, &where)) != NULL) { 946 aux->aux_count++; 947 } else { 948 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 949 aux->aux_guid = vd->vdev_guid; 950 aux->aux_count = 1; 951 avl_insert(avl, aux, where); 952 } 953} 954 955void 956spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 957{ 958 spa_aux_t search; 959 spa_aux_t *aux; 960 avl_index_t where; 961 962 search.aux_guid = vd->vdev_guid; 963 aux = avl_find(avl, &search, &where); 964 965 ASSERT(aux != NULL); 966 967 if (--aux->aux_count == 0) { 968 avl_remove(avl, aux); 969 kmem_free(aux, sizeof (spa_aux_t)); 970 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 971 aux->aux_pool = 0ULL; 972 } 973} 974 975boolean_t 976spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 977{ 978 spa_aux_t search, *found; 979 980 search.aux_guid = guid; 981 found = avl_find(avl, &search, NULL); 982 983 if (pool) { 984 if (found) 985 *pool = found->aux_pool; 986 else 987 *pool = 0ULL; 988 } 989 990 if (refcnt) { 991 if (found) 992 *refcnt = found->aux_count; 993 else 994 *refcnt = 0; 995 } 996 997 return (found != NULL); 998} 999 1000void 1001spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1002{ 1003 spa_aux_t search, *found; 1004 avl_index_t where; 1005 1006 search.aux_guid = vd->vdev_guid; 1007 found = avl_find(avl, &search, &where); 1008 ASSERT(found != NULL); 1009 ASSERT(found->aux_pool == 0ULL); 1010 1011 found->aux_pool = spa_guid(vd->vdev_spa); 1012} 1013 1014/* 1015 * Spares are tracked globally due to the following constraints: 1016 * 1017 * - A spare may be part of multiple pools. 1018 * - A spare may be added to a pool even if it's actively in use within 1019 * another pool. 1020 * - A spare in use in any pool can only be the source of a replacement if 1021 * the target is a spare in the same pool. 1022 * 1023 * We keep track of all spares on the system through the use of a reference 1024 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1025 * spare, then we bump the reference count in the AVL tree. In addition, we set 1026 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1027 * inactive). When a spare is made active (used to replace a device in the 1028 * pool), we also keep track of which pool its been made a part of. 1029 * 1030 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1031 * called under the spa_namespace lock as part of vdev reconfiguration. The 1032 * separate spare lock exists for the status query path, which does not need to 1033 * be completely consistent with respect to other vdev configuration changes. 1034 */ 1035 1036static int 1037spa_spare_compare(const void *a, const void *b) 1038{ 1039 return (spa_aux_compare(a, b)); 1040} 1041 1042void 1043spa_spare_add(vdev_t *vd) 1044{ 1045 mutex_enter(&spa_spare_lock); 1046 ASSERT(!vd->vdev_isspare); 1047 spa_aux_add(vd, &spa_spare_avl); 1048 vd->vdev_isspare = B_TRUE; 1049 mutex_exit(&spa_spare_lock); 1050} 1051 1052void 1053spa_spare_remove(vdev_t *vd) 1054{ 1055 mutex_enter(&spa_spare_lock); 1056 ASSERT(vd->vdev_isspare); 1057 spa_aux_remove(vd, &spa_spare_avl); 1058 vd->vdev_isspare = B_FALSE; 1059 mutex_exit(&spa_spare_lock); 1060} 1061 1062boolean_t 1063spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1064{ 1065 boolean_t found; 1066 1067 mutex_enter(&spa_spare_lock); 1068 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1069 mutex_exit(&spa_spare_lock); 1070 1071 return (found); 1072} 1073 1074void 1075spa_spare_activate(vdev_t *vd) 1076{ 1077 mutex_enter(&spa_spare_lock); 1078 ASSERT(vd->vdev_isspare); 1079 spa_aux_activate(vd, &spa_spare_avl); 1080 mutex_exit(&spa_spare_lock); 1081} 1082 1083/* 1084 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1085 * Cache devices currently only support one pool per cache device, and so 1086 * for these devices the aux reference count is currently unused beyond 1. 1087 */ 1088 1089static int 1090spa_l2cache_compare(const void *a, const void *b) 1091{ 1092 return (spa_aux_compare(a, b)); 1093} 1094 1095void 1096spa_l2cache_add(vdev_t *vd) 1097{ 1098 mutex_enter(&spa_l2cache_lock); 1099 ASSERT(!vd->vdev_isl2cache); 1100 spa_aux_add(vd, &spa_l2cache_avl); 1101 vd->vdev_isl2cache = B_TRUE; 1102 mutex_exit(&spa_l2cache_lock); 1103} 1104 1105void 1106spa_l2cache_remove(vdev_t *vd) 1107{ 1108 mutex_enter(&spa_l2cache_lock); 1109 ASSERT(vd->vdev_isl2cache); 1110 spa_aux_remove(vd, &spa_l2cache_avl); 1111 vd->vdev_isl2cache = B_FALSE; 1112 mutex_exit(&spa_l2cache_lock); 1113} 1114 1115boolean_t 1116spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1117{ 1118 boolean_t found; 1119 1120 mutex_enter(&spa_l2cache_lock); 1121 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1122 mutex_exit(&spa_l2cache_lock); 1123 1124 return (found); 1125} 1126 1127void 1128spa_l2cache_activate(vdev_t *vd) 1129{ 1130 mutex_enter(&spa_l2cache_lock); 1131 ASSERT(vd->vdev_isl2cache); 1132 spa_aux_activate(vd, &spa_l2cache_avl); 1133 mutex_exit(&spa_l2cache_lock); 1134} 1135 1136/* 1137 * ========================================================================== 1138 * SPA vdev locking 1139 * ========================================================================== 1140 */ 1141 1142/* 1143 * Lock the given spa_t for the purpose of adding or removing a vdev. 1144 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1145 * It returns the next transaction group for the spa_t. 1146 */ 1147uint64_t 1148spa_vdev_enter(spa_t *spa) 1149{ 1150 mutex_enter(&spa->spa_vdev_top_lock); 1151 mutex_enter(&spa_namespace_lock); 1152 return (spa_vdev_config_enter(spa)); 1153} 1154 1155/* 1156 * Internal implementation for spa_vdev_enter(). Used when a vdev 1157 * operation requires multiple syncs (i.e. removing a device) while 1158 * keeping the spa_namespace_lock held. 1159 */ 1160uint64_t 1161spa_vdev_config_enter(spa_t *spa) 1162{ 1163 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1164 1165 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1166 1167 return (spa_last_synced_txg(spa) + 1); 1168} 1169 1170/* 1171 * Used in combination with spa_vdev_config_enter() to allow the syncing 1172 * of multiple transactions without releasing the spa_namespace_lock. 1173 */ 1174void 1175spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1176{ 1177 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1178 1179 int config_changed = B_FALSE; 1180 1181 ASSERT(txg > spa_last_synced_txg(spa)); 1182 1183 spa->spa_pending_vdev = NULL; 1184 1185 /* 1186 * Reassess the DTLs. 1187 */ 1188 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1189 1190 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1191 config_changed = B_TRUE; 1192 spa->spa_config_generation++; 1193 } 1194 1195 /* 1196 * Verify the metaslab classes. 1197 */ 1198 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1199 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1200 1201 spa_config_exit(spa, SCL_ALL, spa); 1202 1203 /* 1204 * Panic the system if the specified tag requires it. This 1205 * is useful for ensuring that configurations are updated 1206 * transactionally. 1207 */ 1208 if (zio_injection_enabled) 1209 zio_handle_panic_injection(spa, tag, 0); 1210 1211 /* 1212 * Note: this txg_wait_synced() is important because it ensures 1213 * that there won't be more than one config change per txg. 1214 * This allows us to use the txg as the generation number. 1215 */ 1216 if (error == 0) 1217 txg_wait_synced(spa->spa_dsl_pool, txg); 1218 1219 if (vd != NULL) { 1220 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1221 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1222 vdev_free(vd); 1223 spa_config_exit(spa, SCL_ALL, spa); 1224 } 1225 1226 /* 1227 * If the config changed, update the config cache. 1228 */ 1229 if (config_changed) 1230 spa_config_sync(spa, B_FALSE, B_TRUE); 1231} 1232 1233/* 1234 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1235 * locking of spa_vdev_enter(), we also want make sure the transactions have 1236 * synced to disk, and then update the global configuration cache with the new 1237 * information. 1238 */ 1239int 1240spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1241{ 1242 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1243 mutex_exit(&spa_namespace_lock); 1244 mutex_exit(&spa->spa_vdev_top_lock); 1245 1246 return (error); 1247} 1248 1249/* 1250 * Lock the given spa_t for the purpose of changing vdev state. 1251 */ 1252void 1253spa_vdev_state_enter(spa_t *spa, int oplocks) 1254{ 1255 int locks = SCL_STATE_ALL | oplocks; 1256 1257 /* 1258 * Root pools may need to read of the underlying devfs filesystem 1259 * when opening up a vdev. Unfortunately if we're holding the 1260 * SCL_ZIO lock it will result in a deadlock when we try to issue 1261 * the read from the root filesystem. Instead we "prefetch" 1262 * the associated vnodes that we need prior to opening the 1263 * underlying devices and cache them so that we can prevent 1264 * any I/O when we are doing the actual open. 1265 */ 1266 if (spa_is_root(spa)) { 1267 int low = locks & ~(SCL_ZIO - 1); 1268 int high = locks & ~low; 1269 1270 spa_config_enter(spa, high, spa, RW_WRITER); 1271 vdev_hold(spa->spa_root_vdev); 1272 spa_config_enter(spa, low, spa, RW_WRITER); 1273 } else { 1274 spa_config_enter(spa, locks, spa, RW_WRITER); 1275 } 1276 spa->spa_vdev_locks = locks; 1277} 1278 1279int 1280spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1281{ 1282 boolean_t config_changed = B_FALSE; 1283 1284 if (vd != NULL || error == 0) 1285 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1286 0, 0, B_FALSE); 1287 1288 if (vd != NULL) { 1289 vdev_state_dirty(vd->vdev_top); 1290 config_changed = B_TRUE; 1291 spa->spa_config_generation++; 1292 } 1293 1294 if (spa_is_root(spa)) 1295 vdev_rele(spa->spa_root_vdev); 1296 1297 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1298 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1299 1300 /* 1301 * If anything changed, wait for it to sync. This ensures that, 1302 * from the system administrator's perspective, zpool(1M) commands 1303 * are synchronous. This is important for things like zpool offline: 1304 * when the command completes, you expect no further I/O from ZFS. 1305 */ 1306 if (vd != NULL) 1307 txg_wait_synced(spa->spa_dsl_pool, 0); 1308 1309 /* 1310 * If the config changed, update the config cache. 1311 */ 1312 if (config_changed) { 1313 mutex_enter(&spa_namespace_lock); 1314 spa_config_sync(spa, B_FALSE, B_TRUE); 1315 mutex_exit(&spa_namespace_lock); 1316 } 1317 1318 return (error); 1319} 1320 1321/* 1322 * ========================================================================== 1323 * Miscellaneous functions 1324 * ========================================================================== 1325 */ 1326 1327void 1328spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1329{ 1330 if (!nvlist_exists(spa->spa_label_features, feature)) { 1331 fnvlist_add_boolean(spa->spa_label_features, feature); 1332 /* 1333 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1334 * dirty the vdev config because lock SCL_CONFIG is not held. 1335 * Thankfully, in this case we don't need to dirty the config 1336 * because it will be written out anyway when we finish 1337 * creating the pool. 1338 */ 1339 if (tx->tx_txg != TXG_INITIAL) 1340 vdev_config_dirty(spa->spa_root_vdev); 1341 } 1342} 1343 1344void 1345spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1346{ 1347 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1348 vdev_config_dirty(spa->spa_root_vdev); 1349} 1350 1351/* 1352 * Rename a spa_t. 1353 */ 1354int 1355spa_rename(const char *name, const char *newname) 1356{ 1357 spa_t *spa; 1358 int err; 1359 1360 /* 1361 * Lookup the spa_t and grab the config lock for writing. We need to 1362 * actually open the pool so that we can sync out the necessary labels. 1363 * It's OK to call spa_open() with the namespace lock held because we 1364 * allow recursive calls for other reasons. 1365 */ 1366 mutex_enter(&spa_namespace_lock); 1367 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1368 mutex_exit(&spa_namespace_lock); 1369 return (err); 1370 } 1371 1372 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1373 1374 avl_remove(&spa_namespace_avl, spa); 1375 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1376 avl_add(&spa_namespace_avl, spa); 1377 1378 /* 1379 * Sync all labels to disk with the new names by marking the root vdev 1380 * dirty and waiting for it to sync. It will pick up the new pool name 1381 * during the sync. 1382 */ 1383 vdev_config_dirty(spa->spa_root_vdev); 1384 1385 spa_config_exit(spa, SCL_ALL, FTAG); 1386 1387 txg_wait_synced(spa->spa_dsl_pool, 0); 1388 1389 /* 1390 * Sync the updated config cache. 1391 */ 1392 spa_config_sync(spa, B_FALSE, B_TRUE); 1393 1394 spa_close(spa, FTAG); 1395 1396 mutex_exit(&spa_namespace_lock); 1397 1398 return (0); 1399} 1400 1401/* 1402 * Return the spa_t associated with given pool_guid, if it exists. If 1403 * device_guid is non-zero, determine whether the pool exists *and* contains 1404 * a device with the specified device_guid. 1405 */ 1406spa_t * 1407spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1408{ 1409 spa_t *spa; 1410 avl_tree_t *t = &spa_namespace_avl; 1411 1412 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1413 1414 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1415 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1416 continue; 1417 if (spa->spa_root_vdev == NULL) 1418 continue; 1419 if (spa_guid(spa) == pool_guid) { 1420 if (device_guid == 0) 1421 break; 1422 1423 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1424 device_guid) != NULL) 1425 break; 1426 1427 /* 1428 * Check any devices we may be in the process of adding. 1429 */ 1430 if (spa->spa_pending_vdev) { 1431 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1432 device_guid) != NULL) 1433 break; 1434 } 1435 } 1436 } 1437 1438 return (spa); 1439} 1440 1441/* 1442 * Determine whether a pool with the given pool_guid exists. 1443 */ 1444boolean_t 1445spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1446{ 1447 return (spa_by_guid(pool_guid, device_guid) != NULL); 1448} 1449 1450char * 1451spa_strdup(const char *s) 1452{ 1453 size_t len; 1454 char *new; 1455 1456 len = strlen(s); 1457 new = kmem_alloc(len + 1, KM_SLEEP); 1458 bcopy(s, new, len); 1459 new[len] = '\0'; 1460 1461 return (new); 1462} 1463 1464void 1465spa_strfree(char *s) 1466{ 1467 kmem_free(s, strlen(s) + 1); 1468} 1469 1470uint64_t 1471spa_get_random(uint64_t range) 1472{ 1473 uint64_t r; 1474 1475 ASSERT(range != 0); 1476 1477 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1478 1479 return (r % range); 1480} 1481 1482uint64_t 1483spa_generate_guid(spa_t *spa) 1484{ 1485 uint64_t guid = spa_get_random(-1ULL); 1486 1487 if (spa != NULL) { 1488 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1489 guid = spa_get_random(-1ULL); 1490 } else { 1491 while (guid == 0 || spa_guid_exists(guid, 0)) 1492 guid = spa_get_random(-1ULL); 1493 } 1494 1495 return (guid); 1496} 1497 1498void 1499snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1500{ 1501 char type[256]; 1502 char *checksum = NULL; 1503 char *compress = NULL; 1504 1505 if (bp != NULL) { 1506 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1507 dmu_object_byteswap_t bswap = 1508 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1509 (void) snprintf(type, sizeof (type), "bswap %s %s", 1510 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1511 "metadata" : "data", 1512 dmu_ot_byteswap[bswap].ob_name); 1513 } else { 1514 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1515 sizeof (type)); 1516 } 1517 if (!BP_IS_EMBEDDED(bp)) { 1518 checksum = 1519 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1520 } 1521 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1522 } 1523 1524 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1525 compress); 1526} 1527 1528void 1529spa_freeze(spa_t *spa) 1530{ 1531 uint64_t freeze_txg = 0; 1532 1533 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1534 if (spa->spa_freeze_txg == UINT64_MAX) { 1535 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1536 spa->spa_freeze_txg = freeze_txg; 1537 } 1538 spa_config_exit(spa, SCL_ALL, FTAG); 1539 if (freeze_txg != 0) 1540 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1541} 1542 1543void 1544zfs_panic_recover(const char *fmt, ...) 1545{ 1546 va_list adx; 1547 1548 va_start(adx, fmt); 1549 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1550 va_end(adx); 1551} 1552 1553/* 1554 * This is a stripped-down version of strtoull, suitable only for converting 1555 * lowercase hexadecimal numbers that don't overflow. 1556 */ 1557uint64_t 1558zfs_strtonum(const char *str, char **nptr) 1559{ 1560 uint64_t val = 0; 1561 char c; 1562 int digit; 1563 1564 while ((c = *str) != '\0') { 1565 if (c >= '0' && c <= '9') 1566 digit = c - '0'; 1567 else if (c >= 'a' && c <= 'f') 1568 digit = 10 + c - 'a'; 1569 else 1570 break; 1571 1572 val *= 16; 1573 val += digit; 1574 1575 str++; 1576 } 1577 1578 if (nptr) 1579 *nptr = (char *)str; 1580 1581 return (val); 1582} 1583 1584/* 1585 * ========================================================================== 1586 * Accessor functions 1587 * ========================================================================== 1588 */ 1589 1590boolean_t 1591spa_shutting_down(spa_t *spa) 1592{ 1593 return (spa->spa_async_suspended); 1594} 1595 1596dsl_pool_t * 1597spa_get_dsl(spa_t *spa) 1598{ 1599 return (spa->spa_dsl_pool); 1600} 1601 1602boolean_t 1603spa_is_initializing(spa_t *spa) 1604{ 1605 return (spa->spa_is_initializing); 1606} 1607 1608blkptr_t * 1609spa_get_rootblkptr(spa_t *spa) 1610{ 1611 return (&spa->spa_ubsync.ub_rootbp); 1612} 1613 1614void 1615spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1616{ 1617 spa->spa_uberblock.ub_rootbp = *bp; 1618} 1619 1620void 1621spa_altroot(spa_t *spa, char *buf, size_t buflen) 1622{ 1623 if (spa->spa_root == NULL) 1624 buf[0] = '\0'; 1625 else 1626 (void) strncpy(buf, spa->spa_root, buflen); 1627} 1628 1629int 1630spa_sync_pass(spa_t *spa) 1631{ 1632 return (spa->spa_sync_pass); 1633} 1634 1635char * 1636spa_name(spa_t *spa) 1637{ 1638 return (spa->spa_name); 1639} 1640 1641uint64_t 1642spa_guid(spa_t *spa) 1643{ 1644 dsl_pool_t *dp = spa_get_dsl(spa); 1645 uint64_t guid; 1646 1647 /* 1648 * If we fail to parse the config during spa_load(), we can go through 1649 * the error path (which posts an ereport) and end up here with no root 1650 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1651 * this case. 1652 */ 1653 if (spa->spa_root_vdev == NULL) 1654 return (spa->spa_config_guid); 1655 1656 guid = spa->spa_last_synced_guid != 0 ? 1657 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1658 1659 /* 1660 * Return the most recently synced out guid unless we're 1661 * in syncing context. 1662 */ 1663 if (dp && dsl_pool_sync_context(dp)) 1664 return (spa->spa_root_vdev->vdev_guid); 1665 else 1666 return (guid); 1667} 1668 1669uint64_t 1670spa_load_guid(spa_t *spa) 1671{ 1672 /* 1673 * This is a GUID that exists solely as a reference for the 1674 * purposes of the arc. It is generated at load time, and 1675 * is never written to persistent storage. 1676 */ 1677 return (spa->spa_load_guid); 1678} 1679 1680uint64_t 1681spa_last_synced_txg(spa_t *spa) 1682{ 1683 return (spa->spa_ubsync.ub_txg); 1684} 1685 1686uint64_t 1687spa_first_txg(spa_t *spa) 1688{ 1689 return (spa->spa_first_txg); 1690} 1691 1692uint64_t 1693spa_syncing_txg(spa_t *spa) 1694{ 1695 return (spa->spa_syncing_txg); 1696} 1697 1698pool_state_t 1699spa_state(spa_t *spa) 1700{ 1701 return (spa->spa_state); 1702} 1703 1704spa_load_state_t 1705spa_load_state(spa_t *spa) 1706{ 1707 return (spa->spa_load_state); 1708} 1709 1710uint64_t 1711spa_freeze_txg(spa_t *spa) 1712{ 1713 return (spa->spa_freeze_txg); 1714} 1715 1716/* ARGSUSED */ 1717uint64_t 1718spa_get_asize(spa_t *spa, uint64_t lsize) 1719{ 1720 return (lsize * spa_asize_inflation); 1721} 1722 1723/* 1724 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1725 * or at least 32MB. 1726 * 1727 * See the comment above spa_slop_shift for details. 1728 */ 1729uint64_t 1730spa_get_slop_space(spa_t *spa) { 1731 uint64_t space = spa_get_dspace(spa); 1732 return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1)); 1733} 1734 1735uint64_t 1736spa_get_dspace(spa_t *spa) 1737{ 1738 return (spa->spa_dspace); 1739} 1740 1741void 1742spa_update_dspace(spa_t *spa) 1743{ 1744 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1745 ddt_get_dedup_dspace(spa); 1746} 1747 1748/* 1749 * Return the failure mode that has been set to this pool. The default 1750 * behavior will be to block all I/Os when a complete failure occurs. 1751 */ 1752uint8_t 1753spa_get_failmode(spa_t *spa) 1754{ 1755 return (spa->spa_failmode); 1756} 1757 1758boolean_t 1759spa_suspended(spa_t *spa) 1760{ 1761 return (spa->spa_suspended); 1762} 1763 1764uint64_t 1765spa_version(spa_t *spa) 1766{ 1767 return (spa->spa_ubsync.ub_version); 1768} 1769 1770boolean_t 1771spa_deflate(spa_t *spa) 1772{ 1773 return (spa->spa_deflate); 1774} 1775 1776metaslab_class_t * 1777spa_normal_class(spa_t *spa) 1778{ 1779 return (spa->spa_normal_class); 1780} 1781 1782metaslab_class_t * 1783spa_log_class(spa_t *spa) 1784{ 1785 return (spa->spa_log_class); 1786} 1787 1788void 1789spa_evicting_os_register(spa_t *spa, objset_t *os) 1790{ 1791 mutex_enter(&spa->spa_evicting_os_lock); 1792 list_insert_head(&spa->spa_evicting_os_list, os); 1793 mutex_exit(&spa->spa_evicting_os_lock); 1794} 1795 1796void 1797spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1798{ 1799 mutex_enter(&spa->spa_evicting_os_lock); 1800 list_remove(&spa->spa_evicting_os_list, os); 1801 cv_broadcast(&spa->spa_evicting_os_cv); 1802 mutex_exit(&spa->spa_evicting_os_lock); 1803} 1804 1805void 1806spa_evicting_os_wait(spa_t *spa) 1807{ 1808 mutex_enter(&spa->spa_evicting_os_lock); 1809 while (!list_is_empty(&spa->spa_evicting_os_list)) 1810 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1811 mutex_exit(&spa->spa_evicting_os_lock); 1812 1813 dmu_buf_user_evict_wait(); 1814} 1815 1816int 1817spa_max_replication(spa_t *spa) 1818{ 1819 /* 1820 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1821 * handle BPs with more than one DVA allocated. Set our max 1822 * replication level accordingly. 1823 */ 1824 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1825 return (1); 1826 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1827} 1828 1829int 1830spa_prev_software_version(spa_t *spa) 1831{ 1832 return (spa->spa_prev_software_version); 1833} 1834 1835uint64_t 1836spa_deadman_synctime(spa_t *spa) 1837{ 1838 return (spa->spa_deadman_synctime); 1839} 1840 1841uint64_t 1842dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1843{ 1844 uint64_t asize = DVA_GET_ASIZE(dva); 1845 uint64_t dsize = asize; 1846 1847 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1848 1849 if (asize != 0 && spa->spa_deflate) { 1850 uint64_t vdev = DVA_GET_VDEV(dva); 1851 vdev_t *vd = vdev_lookup_top(spa, vdev); 1852 if (vd == NULL) { 1853 panic( 1854 "dva_get_dsize_sync(): bad DVA %llu:%llu", 1855 (u_longlong_t)vdev, (u_longlong_t)asize); 1856 } 1857 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1858 } 1859 1860 return (dsize); 1861} 1862 1863uint64_t 1864bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1865{ 1866 uint64_t dsize = 0; 1867 1868 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1869 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1870 1871 return (dsize); 1872} 1873 1874uint64_t 1875bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1876{ 1877 uint64_t dsize = 0; 1878 1879 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1880 1881 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1882 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1883 1884 spa_config_exit(spa, SCL_VDEV, FTAG); 1885 1886 return (dsize); 1887} 1888 1889/* 1890 * ========================================================================== 1891 * Initialization and Termination 1892 * ========================================================================== 1893 */ 1894 1895static int 1896spa_name_compare(const void *a1, const void *a2) 1897{ 1898 const spa_t *s1 = a1; 1899 const spa_t *s2 = a2; 1900 int s; 1901 1902 s = strcmp(s1->spa_name, s2->spa_name); 1903 if (s > 0) 1904 return (1); 1905 if (s < 0) 1906 return (-1); 1907 return (0); 1908} 1909 1910int 1911spa_busy(void) 1912{ 1913 return (spa_active_count); 1914} 1915 1916void 1917spa_boot_init() 1918{ 1919 spa_config_load(); 1920} 1921 1922#ifdef _KERNEL 1923EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1924#endif 1925 1926void 1927spa_init(int mode) 1928{ 1929 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1930 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1931 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1932 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1933 1934 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1935 offsetof(spa_t, spa_avl)); 1936 1937 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1938 offsetof(spa_aux_t, aux_avl)); 1939 1940 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1941 offsetof(spa_aux_t, aux_avl)); 1942 1943 spa_mode_global = mode; 1944 1945#ifdef illumos 1946#ifdef _KERNEL 1947 spa_arch_init(); 1948#else 1949 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1950 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1951 if (arc_procfd == -1) { 1952 perror("could not enable watchpoints: " 1953 "opening /proc/self/ctl failed: "); 1954 } else { 1955 arc_watch = B_TRUE; 1956 } 1957 } 1958#endif 1959#endif /* illumos */ 1960 refcount_sysinit(); 1961 unique_init(); 1962 range_tree_init(); 1963 zio_init(); 1964 lz4_init(); 1965 dmu_init(); 1966 zil_init(); 1967 vdev_cache_stat_init(); 1968 zfs_prop_init(); 1969 zpool_prop_init(); 1970 zpool_feature_init(); 1971 spa_config_load(); 1972 l2arc_start(); 1973#ifndef illumos 1974#ifdef _KERNEL 1975 zfs_deadman_init(); 1976#endif 1977#endif /* !illumos */ 1978} 1979 1980void 1981spa_fini(void) 1982{ 1983 l2arc_stop(); 1984 1985 spa_evict_all(); 1986 1987 vdev_cache_stat_fini(); 1988 zil_fini(); 1989 dmu_fini(); 1990 lz4_fini(); 1991 zio_fini(); 1992 range_tree_fini(); 1993 unique_fini(); 1994 refcount_fini(); 1995 1996 avl_destroy(&spa_namespace_avl); 1997 avl_destroy(&spa_spare_avl); 1998 avl_destroy(&spa_l2cache_avl); 1999 2000 cv_destroy(&spa_namespace_cv); 2001 mutex_destroy(&spa_namespace_lock); 2002 mutex_destroy(&spa_spare_lock); 2003 mutex_destroy(&spa_l2cache_lock); 2004} 2005 2006/* 2007 * Return whether this pool has slogs. No locking needed. 2008 * It's not a problem if the wrong answer is returned as it's only for 2009 * performance and not correctness 2010 */ 2011boolean_t 2012spa_has_slogs(spa_t *spa) 2013{ 2014 return (spa->spa_log_class->mc_rotor != NULL); 2015} 2016 2017spa_log_state_t 2018spa_get_log_state(spa_t *spa) 2019{ 2020 return (spa->spa_log_state); 2021} 2022 2023void 2024spa_set_log_state(spa_t *spa, spa_log_state_t state) 2025{ 2026 spa->spa_log_state = state; 2027} 2028 2029boolean_t 2030spa_is_root(spa_t *spa) 2031{ 2032 return (spa->spa_is_root); 2033} 2034 2035boolean_t 2036spa_writeable(spa_t *spa) 2037{ 2038 return (!!(spa->spa_mode & FWRITE)); 2039} 2040 2041/* 2042 * Returns true if there is a pending sync task in any of the current 2043 * syncing txg, the current quiescing txg, or the current open txg. 2044 */ 2045boolean_t 2046spa_has_pending_synctask(spa_t *spa) 2047{ 2048 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2049} 2050 2051int 2052spa_mode(spa_t *spa) 2053{ 2054 return (spa->spa_mode); 2055} 2056 2057uint64_t 2058spa_bootfs(spa_t *spa) 2059{ 2060 return (spa->spa_bootfs); 2061} 2062 2063uint64_t 2064spa_delegation(spa_t *spa) 2065{ 2066 return (spa->spa_delegation); 2067} 2068 2069objset_t * 2070spa_meta_objset(spa_t *spa) 2071{ 2072 return (spa->spa_meta_objset); 2073} 2074 2075enum zio_checksum 2076spa_dedup_checksum(spa_t *spa) 2077{ 2078 return (spa->spa_dedup_checksum); 2079} 2080 2081/* 2082 * Reset pool scan stat per scan pass (or reboot). 2083 */ 2084void 2085spa_scan_stat_init(spa_t *spa) 2086{ 2087 /* data not stored on disk */ 2088 spa->spa_scan_pass_start = gethrestime_sec(); 2089 spa->spa_scan_pass_exam = 0; 2090 vdev_scan_stat_init(spa->spa_root_vdev); 2091} 2092 2093/* 2094 * Get scan stats for zpool status reports 2095 */ 2096int 2097spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2098{ 2099 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2100 2101 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2102 return (SET_ERROR(ENOENT)); 2103 bzero(ps, sizeof (pool_scan_stat_t)); 2104 2105 /* data stored on disk */ 2106 ps->pss_func = scn->scn_phys.scn_func; 2107 ps->pss_start_time = scn->scn_phys.scn_start_time; 2108 ps->pss_end_time = scn->scn_phys.scn_end_time; 2109 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2110 ps->pss_examined = scn->scn_phys.scn_examined; 2111 ps->pss_to_process = scn->scn_phys.scn_to_process; 2112 ps->pss_processed = scn->scn_phys.scn_processed; 2113 ps->pss_errors = scn->scn_phys.scn_errors; 2114 ps->pss_state = scn->scn_phys.scn_state; 2115 2116 /* data not stored on disk */ 2117 ps->pss_pass_start = spa->spa_scan_pass_start; 2118 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2119 2120 return (0); 2121} 2122 2123boolean_t 2124spa_debug_enabled(spa_t *spa) 2125{ 2126 return (spa->spa_debug); 2127} 2128 2129int 2130spa_maxblocksize(spa_t *spa) 2131{ 2132 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2133 return (SPA_MAXBLOCKSIZE); 2134 else 2135 return (SPA_OLD_MAXBLOCKSIZE); 2136} 2137