spa_misc.c revision 297075
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 */ 29 30#include <sys/zfs_context.h> 31#include <sys/spa_impl.h> 32#include <sys/spa_boot.h> 33#include <sys/zio.h> 34#include <sys/zio_checksum.h> 35#include <sys/zio_compress.h> 36#include <sys/dmu.h> 37#include <sys/dmu_tx.h> 38#include <sys/zap.h> 39#include <sys/zil.h> 40#include <sys/vdev_impl.h> 41#include <sys/metaslab.h> 42#include <sys/uberblock_impl.h> 43#include <sys/txg.h> 44#include <sys/avl.h> 45#include <sys/unique.h> 46#include <sys/dsl_pool.h> 47#include <sys/dsl_dir.h> 48#include <sys/dsl_prop.h> 49#include <sys/dsl_scan.h> 50#include <sys/fs/zfs.h> 51#include <sys/metaslab_impl.h> 52#include <sys/arc.h> 53#include <sys/ddt.h> 54#include "zfs_prop.h" 55#include <sys/zfeature.h> 56 57/* 58 * SPA locking 59 * 60 * There are four basic locks for managing spa_t structures: 61 * 62 * spa_namespace_lock (global mutex) 63 * 64 * This lock must be acquired to do any of the following: 65 * 66 * - Lookup a spa_t by name 67 * - Add or remove a spa_t from the namespace 68 * - Increase spa_refcount from non-zero 69 * - Check if spa_refcount is zero 70 * - Rename a spa_t 71 * - add/remove/attach/detach devices 72 * - Held for the duration of create/destroy/import/export 73 * 74 * It does not need to handle recursion. A create or destroy may 75 * reference objects (files or zvols) in other pools, but by 76 * definition they must have an existing reference, and will never need 77 * to lookup a spa_t by name. 78 * 79 * spa_refcount (per-spa refcount_t protected by mutex) 80 * 81 * This reference count keep track of any active users of the spa_t. The 82 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 83 * the refcount is never really 'zero' - opening a pool implicitly keeps 84 * some references in the DMU. Internally we check against spa_minref, but 85 * present the image of a zero/non-zero value to consumers. 86 * 87 * spa_config_lock[] (per-spa array of rwlocks) 88 * 89 * This protects the spa_t from config changes, and must be held in 90 * the following circumstances: 91 * 92 * - RW_READER to perform I/O to the spa 93 * - RW_WRITER to change the vdev config 94 * 95 * The locking order is fairly straightforward: 96 * 97 * spa_namespace_lock -> spa_refcount 98 * 99 * The namespace lock must be acquired to increase the refcount from 0 100 * or to check if it is zero. 101 * 102 * spa_refcount -> spa_config_lock[] 103 * 104 * There must be at least one valid reference on the spa_t to acquire 105 * the config lock. 106 * 107 * spa_namespace_lock -> spa_config_lock[] 108 * 109 * The namespace lock must always be taken before the config lock. 110 * 111 * 112 * The spa_namespace_lock can be acquired directly and is globally visible. 113 * 114 * The namespace is manipulated using the following functions, all of which 115 * require the spa_namespace_lock to be held. 116 * 117 * spa_lookup() Lookup a spa_t by name. 118 * 119 * spa_add() Create a new spa_t in the namespace. 120 * 121 * spa_remove() Remove a spa_t from the namespace. This also 122 * frees up any memory associated with the spa_t. 123 * 124 * spa_next() Returns the next spa_t in the system, or the 125 * first if NULL is passed. 126 * 127 * spa_evict_all() Shutdown and remove all spa_t structures in 128 * the system. 129 * 130 * spa_guid_exists() Determine whether a pool/device guid exists. 131 * 132 * The spa_refcount is manipulated using the following functions: 133 * 134 * spa_open_ref() Adds a reference to the given spa_t. Must be 135 * called with spa_namespace_lock held if the 136 * refcount is currently zero. 137 * 138 * spa_close() Remove a reference from the spa_t. This will 139 * not free the spa_t or remove it from the 140 * namespace. No locking is required. 141 * 142 * spa_refcount_zero() Returns true if the refcount is currently 143 * zero. Must be called with spa_namespace_lock 144 * held. 145 * 146 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 147 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 148 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 149 * 150 * To read the configuration, it suffices to hold one of these locks as reader. 151 * To modify the configuration, you must hold all locks as writer. To modify 152 * vdev state without altering the vdev tree's topology (e.g. online/offline), 153 * you must hold SCL_STATE and SCL_ZIO as writer. 154 * 155 * We use these distinct config locks to avoid recursive lock entry. 156 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 157 * block allocations (SCL_ALLOC), which may require reading space maps 158 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 159 * 160 * The spa config locks cannot be normal rwlocks because we need the 161 * ability to hand off ownership. For example, SCL_ZIO is acquired 162 * by the issuing thread and later released by an interrupt thread. 163 * They do, however, obey the usual write-wanted semantics to prevent 164 * writer (i.e. system administrator) starvation. 165 * 166 * The lock acquisition rules are as follows: 167 * 168 * SCL_CONFIG 169 * Protects changes to the vdev tree topology, such as vdev 170 * add/remove/attach/detach. Protects the dirty config list 171 * (spa_config_dirty_list) and the set of spares and l2arc devices. 172 * 173 * SCL_STATE 174 * Protects changes to pool state and vdev state, such as vdev 175 * online/offline/fault/degrade/clear. Protects the dirty state list 176 * (spa_state_dirty_list) and global pool state (spa_state). 177 * 178 * SCL_ALLOC 179 * Protects changes to metaslab groups and classes. 180 * Held as reader by metaslab_alloc() and metaslab_claim(). 181 * 182 * SCL_ZIO 183 * Held by bp-level zios (those which have no io_vd upon entry) 184 * to prevent changes to the vdev tree. The bp-level zio implicitly 185 * protects all of its vdev child zios, which do not hold SCL_ZIO. 186 * 187 * SCL_FREE 188 * Protects changes to metaslab groups and classes. 189 * Held as reader by metaslab_free(). SCL_FREE is distinct from 190 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 191 * blocks in zio_done() while another i/o that holds either 192 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 193 * 194 * SCL_VDEV 195 * Held as reader to prevent changes to the vdev tree during trivial 196 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 197 * other locks, and lower than all of them, to ensure that it's safe 198 * to acquire regardless of caller context. 199 * 200 * In addition, the following rules apply: 201 * 202 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 203 * The lock ordering is SCL_CONFIG > spa_props_lock. 204 * 205 * (b) I/O operations on leaf vdevs. For any zio operation that takes 206 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 207 * or zio_write_phys() -- the caller must ensure that the config cannot 208 * cannot change in the interim, and that the vdev cannot be reopened. 209 * SCL_STATE as reader suffices for both. 210 * 211 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 212 * 213 * spa_vdev_enter() Acquire the namespace lock and the config lock 214 * for writing. 215 * 216 * spa_vdev_exit() Release the config lock, wait for all I/O 217 * to complete, sync the updated configs to the 218 * cache, and release the namespace lock. 219 * 220 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 221 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 222 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 223 * 224 * spa_rename() is also implemented within this file since it requires 225 * manipulation of the namespace. 226 */ 227 228static avl_tree_t spa_namespace_avl; 229kmutex_t spa_namespace_lock; 230static kcondvar_t spa_namespace_cv; 231static int spa_active_count; 232int spa_max_replication_override = SPA_DVAS_PER_BP; 233 234static kmutex_t spa_spare_lock; 235static avl_tree_t spa_spare_avl; 236static kmutex_t spa_l2cache_lock; 237static avl_tree_t spa_l2cache_avl; 238 239kmem_cache_t *spa_buffer_pool; 240int spa_mode_global; 241 242#ifdef ZFS_DEBUG 243/* Everything except dprintf and spa is on by default in debug builds */ 244int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 245#else 246int zfs_flags = 0; 247#endif 248SYSCTL_DECL(_debug); 249TUNABLE_INT("debug.zfs_flags", &zfs_flags); 250SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 251 "ZFS debug flags."); 252 253/* 254 * zfs_recover can be set to nonzero to attempt to recover from 255 * otherwise-fatal errors, typically caused by on-disk corruption. When 256 * set, calls to zfs_panic_recover() will turn into warning messages. 257 * This should only be used as a last resort, as it typically results 258 * in leaked space, or worse. 259 */ 260boolean_t zfs_recover = B_FALSE; 261SYSCTL_DECL(_vfs_zfs); 262TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 263SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 264 "Try to recover from otherwise-fatal errors."); 265 266static int 267sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 268{ 269 int err, val; 270 271 val = zfs_flags; 272 err = sysctl_handle_int(oidp, &val, 0, req); 273 if (err != 0 || req->newptr == NULL) 274 return (err); 275 276 /* 277 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 278 * arc buffers in the system have the necessary additional 279 * checksum data. However, it is safe to disable at any 280 * time. 281 */ 282 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 283 val &= ~ZFS_DEBUG_MODIFY; 284 zfs_flags = val; 285 286 return (0); 287} 288TUNABLE_INT("vfs.zfs.debug_flags", &zfs_flags); 289SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 290 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 291 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 292 293/* 294 * If destroy encounters an EIO while reading metadata (e.g. indirect 295 * blocks), space referenced by the missing metadata can not be freed. 296 * Normally this causes the background destroy to become "stalled", as 297 * it is unable to make forward progress. While in this stalled state, 298 * all remaining space to free from the error-encountering filesystem is 299 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 300 * permanently leak the space from indirect blocks that can not be read, 301 * and continue to free everything else that it can. 302 * 303 * The default, "stalling" behavior is useful if the storage partially 304 * fails (i.e. some but not all i/os fail), and then later recovers. In 305 * this case, we will be able to continue pool operations while it is 306 * partially failed, and when it recovers, we can continue to free the 307 * space, with no leaks. However, note that this case is actually 308 * fairly rare. 309 * 310 * Typically pools either (a) fail completely (but perhaps temporarily, 311 * e.g. a top-level vdev going offline), or (b) have localized, 312 * permanent errors (e.g. disk returns the wrong data due to bit flip or 313 * firmware bug). In case (a), this setting does not matter because the 314 * pool will be suspended and the sync thread will not be able to make 315 * forward progress regardless. In case (b), because the error is 316 * permanent, the best we can do is leak the minimum amount of space, 317 * which is what setting this flag will do. Therefore, it is reasonable 318 * for this flag to normally be set, but we chose the more conservative 319 * approach of not setting it, so that there is no possibility of 320 * leaking space in the "partial temporary" failure case. 321 */ 322boolean_t zfs_free_leak_on_eio = B_FALSE; 323 324/* 325 * Expiration time in milliseconds. This value has two meanings. First it is 326 * used to determine when the spa_deadman() logic should fire. By default the 327 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 328 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 329 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 330 * in a system panic. 331 */ 332uint64_t zfs_deadman_synctime_ms = 1000000ULL; 333TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 334SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 335 &zfs_deadman_synctime_ms, 0, 336 "Stalled ZFS I/O expiration time in milliseconds"); 337 338/* 339 * Check time in milliseconds. This defines the frequency at which we check 340 * for hung I/O. 341 */ 342uint64_t zfs_deadman_checktime_ms = 5000ULL; 343TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 344SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 345 &zfs_deadman_checktime_ms, 0, 346 "Period of checks for stalled ZFS I/O in milliseconds"); 347 348/* 349 * Default value of -1 for zfs_deadman_enabled is resolved in 350 * zfs_deadman_init() 351 */ 352int zfs_deadman_enabled = -1; 353TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 354SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 355 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 356 357/* 358 * The worst case is single-sector max-parity RAID-Z blocks, in which 359 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 360 * times the size; so just assume that. Add to this the fact that 361 * we can have up to 3 DVAs per bp, and one more factor of 2 because 362 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 363 * the worst case is: 364 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 365 */ 366int spa_asize_inflation = 24; 367TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation); 368SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 369 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 370 371#ifndef illumos 372#ifdef _KERNEL 373static void 374zfs_deadman_init() 375{ 376 /* 377 * If we are not i386 or amd64 or in a virtual machine, 378 * disable ZFS deadman thread by default 379 */ 380 if (zfs_deadman_enabled == -1) { 381#if defined(__amd64__) || defined(__i386__) 382 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 383#else 384 zfs_deadman_enabled = 0; 385#endif 386 } 387} 388#endif /* _KERNEL */ 389#endif /* !illumos */ 390 391/* 392 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 393 * the pool to be consumed. This ensures that we don't run the pool 394 * completely out of space, due to unaccounted changes (e.g. to the MOS). 395 * It also limits the worst-case time to allocate space. If we have 396 * less than this amount of free space, most ZPL operations (e.g. write, 397 * create) will return ENOSPC. 398 * 399 * Certain operations (e.g. file removal, most administrative actions) can 400 * use half the slop space. They will only return ENOSPC if less than half 401 * the slop space is free. Typically, once the pool has less than the slop 402 * space free, the user will use these operations to free up space in the pool. 403 * These are the operations that call dsl_pool_adjustedsize() with the netfree 404 * argument set to TRUE. 405 * 406 * A very restricted set of operations are always permitted, regardless of 407 * the amount of free space. These are the operations that call 408 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 409 * operations result in a net increase in the amount of space used, 410 * it is possible to run the pool completely out of space, causing it to 411 * be permanently read-only. 412 * 413 * See also the comments in zfs_space_check_t. 414 */ 415int spa_slop_shift = 5; 416SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 417 &spa_slop_shift, 0, 418 "Shift value of reserved space (1/(2^spa_slop_shift))."); 419 420/* 421 * ========================================================================== 422 * SPA config locking 423 * ========================================================================== 424 */ 425static void 426spa_config_lock_init(spa_t *spa) 427{ 428 for (int i = 0; i < SCL_LOCKS; i++) { 429 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 430 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 431 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 432 refcount_create_untracked(&scl->scl_count); 433 scl->scl_writer = NULL; 434 scl->scl_write_wanted = 0; 435 } 436} 437 438static void 439spa_config_lock_destroy(spa_t *spa) 440{ 441 for (int i = 0; i < SCL_LOCKS; i++) { 442 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 443 mutex_destroy(&scl->scl_lock); 444 cv_destroy(&scl->scl_cv); 445 refcount_destroy(&scl->scl_count); 446 ASSERT(scl->scl_writer == NULL); 447 ASSERT(scl->scl_write_wanted == 0); 448 } 449} 450 451int 452spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 453{ 454 for (int i = 0; i < SCL_LOCKS; i++) { 455 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 456 if (!(locks & (1 << i))) 457 continue; 458 mutex_enter(&scl->scl_lock); 459 if (rw == RW_READER) { 460 if (scl->scl_writer || scl->scl_write_wanted) { 461 mutex_exit(&scl->scl_lock); 462 spa_config_exit(spa, locks ^ (1 << i), tag); 463 return (0); 464 } 465 } else { 466 ASSERT(scl->scl_writer != curthread); 467 if (!refcount_is_zero(&scl->scl_count)) { 468 mutex_exit(&scl->scl_lock); 469 spa_config_exit(spa, locks ^ (1 << i), tag); 470 return (0); 471 } 472 scl->scl_writer = curthread; 473 } 474 (void) refcount_add(&scl->scl_count, tag); 475 mutex_exit(&scl->scl_lock); 476 } 477 return (1); 478} 479 480void 481spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 482{ 483 int wlocks_held = 0; 484 485 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 486 487 for (int i = 0; i < SCL_LOCKS; i++) { 488 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 489 if (scl->scl_writer == curthread) 490 wlocks_held |= (1 << i); 491 if (!(locks & (1 << i))) 492 continue; 493 mutex_enter(&scl->scl_lock); 494 if (rw == RW_READER) { 495 while (scl->scl_writer || scl->scl_write_wanted) { 496 cv_wait(&scl->scl_cv, &scl->scl_lock); 497 } 498 } else { 499 ASSERT(scl->scl_writer != curthread); 500 while (!refcount_is_zero(&scl->scl_count)) { 501 scl->scl_write_wanted++; 502 cv_wait(&scl->scl_cv, &scl->scl_lock); 503 scl->scl_write_wanted--; 504 } 505 scl->scl_writer = curthread; 506 } 507 (void) refcount_add(&scl->scl_count, tag); 508 mutex_exit(&scl->scl_lock); 509 } 510 ASSERT(wlocks_held <= locks); 511} 512 513void 514spa_config_exit(spa_t *spa, int locks, void *tag) 515{ 516 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 517 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 518 if (!(locks & (1 << i))) 519 continue; 520 mutex_enter(&scl->scl_lock); 521 ASSERT(!refcount_is_zero(&scl->scl_count)); 522 if (refcount_remove(&scl->scl_count, tag) == 0) { 523 ASSERT(scl->scl_writer == NULL || 524 scl->scl_writer == curthread); 525 scl->scl_writer = NULL; /* OK in either case */ 526 cv_broadcast(&scl->scl_cv); 527 } 528 mutex_exit(&scl->scl_lock); 529 } 530} 531 532int 533spa_config_held(spa_t *spa, int locks, krw_t rw) 534{ 535 int locks_held = 0; 536 537 for (int i = 0; i < SCL_LOCKS; i++) { 538 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 539 if (!(locks & (1 << i))) 540 continue; 541 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 542 (rw == RW_WRITER && scl->scl_writer == curthread)) 543 locks_held |= 1 << i; 544 } 545 546 return (locks_held); 547} 548 549/* 550 * ========================================================================== 551 * SPA namespace functions 552 * ========================================================================== 553 */ 554 555/* 556 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 557 * Returns NULL if no matching spa_t is found. 558 */ 559spa_t * 560spa_lookup(const char *name) 561{ 562 static spa_t search; /* spa_t is large; don't allocate on stack */ 563 spa_t *spa; 564 avl_index_t where; 565 char *cp; 566 567 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 568 569 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 570 571 /* 572 * If it's a full dataset name, figure out the pool name and 573 * just use that. 574 */ 575 cp = strpbrk(search.spa_name, "/@#"); 576 if (cp != NULL) 577 *cp = '\0'; 578 579 spa = avl_find(&spa_namespace_avl, &search, &where); 580 581 return (spa); 582} 583 584/* 585 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 586 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 587 * looking for potentially hung I/Os. 588 */ 589void 590spa_deadman(void *arg) 591{ 592 spa_t *spa = arg; 593 594 /* 595 * Disable the deadman timer if the pool is suspended. 596 */ 597 if (spa_suspended(spa)) { 598#ifdef illumos 599 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 600#else 601 /* Nothing. just don't schedule any future callouts. */ 602#endif 603 return; 604 } 605 606 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 607 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 608 ++spa->spa_deadman_calls); 609 if (zfs_deadman_enabled) 610 vdev_deadman(spa->spa_root_vdev); 611#ifdef __FreeBSD__ 612#ifdef _KERNEL 613 callout_schedule(&spa->spa_deadman_cycid, 614 hz * zfs_deadman_checktime_ms / MILLISEC); 615#endif 616#endif 617} 618 619/* 620 * Create an uninitialized spa_t with the given name. Requires 621 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 622 * exist by calling spa_lookup() first. 623 */ 624spa_t * 625spa_add(const char *name, nvlist_t *config, const char *altroot) 626{ 627 spa_t *spa; 628 spa_config_dirent_t *dp; 629#ifdef illumos 630 cyc_handler_t hdlr; 631 cyc_time_t when; 632#endif 633 634 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 635 636 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 637 638 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 639 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 640 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 641 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 642 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 643 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 644 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 645 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 646 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 647 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 648 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 649 650 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 651 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 652 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 653 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 654 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 655 656 for (int t = 0; t < TXG_SIZE; t++) 657 bplist_create(&spa->spa_free_bplist[t]); 658 659 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 660 spa->spa_state = POOL_STATE_UNINITIALIZED; 661 spa->spa_freeze_txg = UINT64_MAX; 662 spa->spa_final_txg = UINT64_MAX; 663 spa->spa_load_max_txg = UINT64_MAX; 664 spa->spa_proc = &p0; 665 spa->spa_proc_state = SPA_PROC_NONE; 666 667#ifdef illumos 668 hdlr.cyh_func = spa_deadman; 669 hdlr.cyh_arg = spa; 670 hdlr.cyh_level = CY_LOW_LEVEL; 671#endif 672 673 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 674 675#ifdef illumos 676 /* 677 * This determines how often we need to check for hung I/Os after 678 * the cyclic has already fired. Since checking for hung I/Os is 679 * an expensive operation we don't want to check too frequently. 680 * Instead wait for 5 seconds before checking again. 681 */ 682 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 683 when.cyt_when = CY_INFINITY; 684 mutex_enter(&cpu_lock); 685 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 686 mutex_exit(&cpu_lock); 687#else /* !illumos */ 688#ifdef _KERNEL 689 callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE); 690#endif 691#endif 692 refcount_create(&spa->spa_refcount); 693 spa_config_lock_init(spa); 694 695 avl_add(&spa_namespace_avl, spa); 696 697 /* 698 * Set the alternate root, if there is one. 699 */ 700 if (altroot) { 701 spa->spa_root = spa_strdup(altroot); 702 spa_active_count++; 703 } 704 705 /* 706 * Every pool starts with the default cachefile 707 */ 708 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 709 offsetof(spa_config_dirent_t, scd_link)); 710 711 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 712 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 713 list_insert_head(&spa->spa_config_list, dp); 714 715 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 716 KM_SLEEP) == 0); 717 718 if (config != NULL) { 719 nvlist_t *features; 720 721 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 722 &features) == 0) { 723 VERIFY(nvlist_dup(features, &spa->spa_label_features, 724 0) == 0); 725 } 726 727 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 728 } 729 730 if (spa->spa_label_features == NULL) { 731 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 732 KM_SLEEP) == 0); 733 } 734 735 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 736 737 spa->spa_min_ashift = INT_MAX; 738 spa->spa_max_ashift = 0; 739 740 /* 741 * As a pool is being created, treat all features as disabled by 742 * setting SPA_FEATURE_DISABLED for all entries in the feature 743 * refcount cache. 744 */ 745 for (int i = 0; i < SPA_FEATURES; i++) { 746 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 747 } 748 749 return (spa); 750} 751 752/* 753 * Removes a spa_t from the namespace, freeing up any memory used. Requires 754 * spa_namespace_lock. This is called only after the spa_t has been closed and 755 * deactivated. 756 */ 757void 758spa_remove(spa_t *spa) 759{ 760 spa_config_dirent_t *dp; 761 762 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 763 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 764 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 765 766 nvlist_free(spa->spa_config_splitting); 767 768 avl_remove(&spa_namespace_avl, spa); 769 cv_broadcast(&spa_namespace_cv); 770 771 if (spa->spa_root) { 772 spa_strfree(spa->spa_root); 773 spa_active_count--; 774 } 775 776 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 777 list_remove(&spa->spa_config_list, dp); 778 if (dp->scd_path != NULL) 779 spa_strfree(dp->scd_path); 780 kmem_free(dp, sizeof (spa_config_dirent_t)); 781 } 782 783 list_destroy(&spa->spa_config_list); 784 785 nvlist_free(spa->spa_label_features); 786 nvlist_free(spa->spa_load_info); 787 spa_config_set(spa, NULL); 788 789#ifdef illumos 790 mutex_enter(&cpu_lock); 791 if (spa->spa_deadman_cycid != CYCLIC_NONE) 792 cyclic_remove(spa->spa_deadman_cycid); 793 mutex_exit(&cpu_lock); 794 spa->spa_deadman_cycid = CYCLIC_NONE; 795#else /* !illumos */ 796#ifdef _KERNEL 797 callout_drain(&spa->spa_deadman_cycid); 798#endif 799#endif 800 801 refcount_destroy(&spa->spa_refcount); 802 803 spa_config_lock_destroy(spa); 804 805 for (int t = 0; t < TXG_SIZE; t++) 806 bplist_destroy(&spa->spa_free_bplist[t]); 807 808 zio_checksum_templates_free(spa); 809 810 cv_destroy(&spa->spa_async_cv); 811 cv_destroy(&spa->spa_evicting_os_cv); 812 cv_destroy(&spa->spa_proc_cv); 813 cv_destroy(&spa->spa_scrub_io_cv); 814 cv_destroy(&spa->spa_suspend_cv); 815 816 mutex_destroy(&spa->spa_async_lock); 817 mutex_destroy(&spa->spa_errlist_lock); 818 mutex_destroy(&spa->spa_errlog_lock); 819 mutex_destroy(&spa->spa_evicting_os_lock); 820 mutex_destroy(&spa->spa_history_lock); 821 mutex_destroy(&spa->spa_proc_lock); 822 mutex_destroy(&spa->spa_props_lock); 823 mutex_destroy(&spa->spa_cksum_tmpls_lock); 824 mutex_destroy(&spa->spa_scrub_lock); 825 mutex_destroy(&spa->spa_suspend_lock); 826 mutex_destroy(&spa->spa_vdev_top_lock); 827 828 kmem_free(spa, sizeof (spa_t)); 829} 830 831/* 832 * Given a pool, return the next pool in the namespace, or NULL if there is 833 * none. If 'prev' is NULL, return the first pool. 834 */ 835spa_t * 836spa_next(spa_t *prev) 837{ 838 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 839 840 if (prev) 841 return (AVL_NEXT(&spa_namespace_avl, prev)); 842 else 843 return (avl_first(&spa_namespace_avl)); 844} 845 846/* 847 * ========================================================================== 848 * SPA refcount functions 849 * ========================================================================== 850 */ 851 852/* 853 * Add a reference to the given spa_t. Must have at least one reference, or 854 * have the namespace lock held. 855 */ 856void 857spa_open_ref(spa_t *spa, void *tag) 858{ 859 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 860 MUTEX_HELD(&spa_namespace_lock)); 861 (void) refcount_add(&spa->spa_refcount, tag); 862} 863 864/* 865 * Remove a reference to the given spa_t. Must have at least one reference, or 866 * have the namespace lock held. 867 */ 868void 869spa_close(spa_t *spa, void *tag) 870{ 871 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 872 MUTEX_HELD(&spa_namespace_lock)); 873 (void) refcount_remove(&spa->spa_refcount, tag); 874} 875 876/* 877 * Remove a reference to the given spa_t held by a dsl dir that is 878 * being asynchronously released. Async releases occur from a taskq 879 * performing eviction of dsl datasets and dirs. The namespace lock 880 * isn't held and the hold by the object being evicted may contribute to 881 * spa_minref (e.g. dataset or directory released during pool export), 882 * so the asserts in spa_close() do not apply. 883 */ 884void 885spa_async_close(spa_t *spa, void *tag) 886{ 887 (void) refcount_remove(&spa->spa_refcount, tag); 888} 889 890/* 891 * Check to see if the spa refcount is zero. Must be called with 892 * spa_namespace_lock held. We really compare against spa_minref, which is the 893 * number of references acquired when opening a pool 894 */ 895boolean_t 896spa_refcount_zero(spa_t *spa) 897{ 898 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 899 900 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 901} 902 903/* 904 * ========================================================================== 905 * SPA spare and l2cache tracking 906 * ========================================================================== 907 */ 908 909/* 910 * Hot spares and cache devices are tracked using the same code below, 911 * for 'auxiliary' devices. 912 */ 913 914typedef struct spa_aux { 915 uint64_t aux_guid; 916 uint64_t aux_pool; 917 avl_node_t aux_avl; 918 int aux_count; 919} spa_aux_t; 920 921static int 922spa_aux_compare(const void *a, const void *b) 923{ 924 const spa_aux_t *sa = a; 925 const spa_aux_t *sb = b; 926 927 if (sa->aux_guid < sb->aux_guid) 928 return (-1); 929 else if (sa->aux_guid > sb->aux_guid) 930 return (1); 931 else 932 return (0); 933} 934 935void 936spa_aux_add(vdev_t *vd, avl_tree_t *avl) 937{ 938 avl_index_t where; 939 spa_aux_t search; 940 spa_aux_t *aux; 941 942 search.aux_guid = vd->vdev_guid; 943 if ((aux = avl_find(avl, &search, &where)) != NULL) { 944 aux->aux_count++; 945 } else { 946 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 947 aux->aux_guid = vd->vdev_guid; 948 aux->aux_count = 1; 949 avl_insert(avl, aux, where); 950 } 951} 952 953void 954spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 955{ 956 spa_aux_t search; 957 spa_aux_t *aux; 958 avl_index_t where; 959 960 search.aux_guid = vd->vdev_guid; 961 aux = avl_find(avl, &search, &where); 962 963 ASSERT(aux != NULL); 964 965 if (--aux->aux_count == 0) { 966 avl_remove(avl, aux); 967 kmem_free(aux, sizeof (spa_aux_t)); 968 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 969 aux->aux_pool = 0ULL; 970 } 971} 972 973boolean_t 974spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 975{ 976 spa_aux_t search, *found; 977 978 search.aux_guid = guid; 979 found = avl_find(avl, &search, NULL); 980 981 if (pool) { 982 if (found) 983 *pool = found->aux_pool; 984 else 985 *pool = 0ULL; 986 } 987 988 if (refcnt) { 989 if (found) 990 *refcnt = found->aux_count; 991 else 992 *refcnt = 0; 993 } 994 995 return (found != NULL); 996} 997 998void 999spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1000{ 1001 spa_aux_t search, *found; 1002 avl_index_t where; 1003 1004 search.aux_guid = vd->vdev_guid; 1005 found = avl_find(avl, &search, &where); 1006 ASSERT(found != NULL); 1007 ASSERT(found->aux_pool == 0ULL); 1008 1009 found->aux_pool = spa_guid(vd->vdev_spa); 1010} 1011 1012/* 1013 * Spares are tracked globally due to the following constraints: 1014 * 1015 * - A spare may be part of multiple pools. 1016 * - A spare may be added to a pool even if it's actively in use within 1017 * another pool. 1018 * - A spare in use in any pool can only be the source of a replacement if 1019 * the target is a spare in the same pool. 1020 * 1021 * We keep track of all spares on the system through the use of a reference 1022 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1023 * spare, then we bump the reference count in the AVL tree. In addition, we set 1024 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1025 * inactive). When a spare is made active (used to replace a device in the 1026 * pool), we also keep track of which pool its been made a part of. 1027 * 1028 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1029 * called under the spa_namespace lock as part of vdev reconfiguration. The 1030 * separate spare lock exists for the status query path, which does not need to 1031 * be completely consistent with respect to other vdev configuration changes. 1032 */ 1033 1034static int 1035spa_spare_compare(const void *a, const void *b) 1036{ 1037 return (spa_aux_compare(a, b)); 1038} 1039 1040void 1041spa_spare_add(vdev_t *vd) 1042{ 1043 mutex_enter(&spa_spare_lock); 1044 ASSERT(!vd->vdev_isspare); 1045 spa_aux_add(vd, &spa_spare_avl); 1046 vd->vdev_isspare = B_TRUE; 1047 mutex_exit(&spa_spare_lock); 1048} 1049 1050void 1051spa_spare_remove(vdev_t *vd) 1052{ 1053 mutex_enter(&spa_spare_lock); 1054 ASSERT(vd->vdev_isspare); 1055 spa_aux_remove(vd, &spa_spare_avl); 1056 vd->vdev_isspare = B_FALSE; 1057 mutex_exit(&spa_spare_lock); 1058} 1059 1060boolean_t 1061spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1062{ 1063 boolean_t found; 1064 1065 mutex_enter(&spa_spare_lock); 1066 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1067 mutex_exit(&spa_spare_lock); 1068 1069 return (found); 1070} 1071 1072void 1073spa_spare_activate(vdev_t *vd) 1074{ 1075 mutex_enter(&spa_spare_lock); 1076 ASSERT(vd->vdev_isspare); 1077 spa_aux_activate(vd, &spa_spare_avl); 1078 mutex_exit(&spa_spare_lock); 1079} 1080 1081/* 1082 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1083 * Cache devices currently only support one pool per cache device, and so 1084 * for these devices the aux reference count is currently unused beyond 1. 1085 */ 1086 1087static int 1088spa_l2cache_compare(const void *a, const void *b) 1089{ 1090 return (spa_aux_compare(a, b)); 1091} 1092 1093void 1094spa_l2cache_add(vdev_t *vd) 1095{ 1096 mutex_enter(&spa_l2cache_lock); 1097 ASSERT(!vd->vdev_isl2cache); 1098 spa_aux_add(vd, &spa_l2cache_avl); 1099 vd->vdev_isl2cache = B_TRUE; 1100 mutex_exit(&spa_l2cache_lock); 1101} 1102 1103void 1104spa_l2cache_remove(vdev_t *vd) 1105{ 1106 mutex_enter(&spa_l2cache_lock); 1107 ASSERT(vd->vdev_isl2cache); 1108 spa_aux_remove(vd, &spa_l2cache_avl); 1109 vd->vdev_isl2cache = B_FALSE; 1110 mutex_exit(&spa_l2cache_lock); 1111} 1112 1113boolean_t 1114spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1115{ 1116 boolean_t found; 1117 1118 mutex_enter(&spa_l2cache_lock); 1119 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1120 mutex_exit(&spa_l2cache_lock); 1121 1122 return (found); 1123} 1124 1125void 1126spa_l2cache_activate(vdev_t *vd) 1127{ 1128 mutex_enter(&spa_l2cache_lock); 1129 ASSERT(vd->vdev_isl2cache); 1130 spa_aux_activate(vd, &spa_l2cache_avl); 1131 mutex_exit(&spa_l2cache_lock); 1132} 1133 1134/* 1135 * ========================================================================== 1136 * SPA vdev locking 1137 * ========================================================================== 1138 */ 1139 1140/* 1141 * Lock the given spa_t for the purpose of adding or removing a vdev. 1142 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1143 * It returns the next transaction group for the spa_t. 1144 */ 1145uint64_t 1146spa_vdev_enter(spa_t *spa) 1147{ 1148 mutex_enter(&spa->spa_vdev_top_lock); 1149 mutex_enter(&spa_namespace_lock); 1150 return (spa_vdev_config_enter(spa)); 1151} 1152 1153/* 1154 * Internal implementation for spa_vdev_enter(). Used when a vdev 1155 * operation requires multiple syncs (i.e. removing a device) while 1156 * keeping the spa_namespace_lock held. 1157 */ 1158uint64_t 1159spa_vdev_config_enter(spa_t *spa) 1160{ 1161 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1162 1163 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1164 1165 return (spa_last_synced_txg(spa) + 1); 1166} 1167 1168/* 1169 * Used in combination with spa_vdev_config_enter() to allow the syncing 1170 * of multiple transactions without releasing the spa_namespace_lock. 1171 */ 1172void 1173spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1174{ 1175 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1176 1177 int config_changed = B_FALSE; 1178 1179 ASSERT(txg > spa_last_synced_txg(spa)); 1180 1181 spa->spa_pending_vdev = NULL; 1182 1183 /* 1184 * Reassess the DTLs. 1185 */ 1186 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1187 1188 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1189 config_changed = B_TRUE; 1190 spa->spa_config_generation++; 1191 } 1192 1193 /* 1194 * Verify the metaslab classes. 1195 */ 1196 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1197 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1198 1199 spa_config_exit(spa, SCL_ALL, spa); 1200 1201 /* 1202 * Panic the system if the specified tag requires it. This 1203 * is useful for ensuring that configurations are updated 1204 * transactionally. 1205 */ 1206 if (zio_injection_enabled) 1207 zio_handle_panic_injection(spa, tag, 0); 1208 1209 /* 1210 * Note: this txg_wait_synced() is important because it ensures 1211 * that there won't be more than one config change per txg. 1212 * This allows us to use the txg as the generation number. 1213 */ 1214 if (error == 0) 1215 txg_wait_synced(spa->spa_dsl_pool, txg); 1216 1217 if (vd != NULL) { 1218 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1219 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1220 vdev_free(vd); 1221 spa_config_exit(spa, SCL_ALL, spa); 1222 } 1223 1224 /* 1225 * If the config changed, update the config cache. 1226 */ 1227 if (config_changed) 1228 spa_config_sync(spa, B_FALSE, B_TRUE); 1229} 1230 1231/* 1232 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1233 * locking of spa_vdev_enter(), we also want make sure the transactions have 1234 * synced to disk, and then update the global configuration cache with the new 1235 * information. 1236 */ 1237int 1238spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1239{ 1240 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1241 mutex_exit(&spa_namespace_lock); 1242 mutex_exit(&spa->spa_vdev_top_lock); 1243 1244 return (error); 1245} 1246 1247/* 1248 * Lock the given spa_t for the purpose of changing vdev state. 1249 */ 1250void 1251spa_vdev_state_enter(spa_t *spa, int oplocks) 1252{ 1253 int locks = SCL_STATE_ALL | oplocks; 1254 1255 /* 1256 * Root pools may need to read of the underlying devfs filesystem 1257 * when opening up a vdev. Unfortunately if we're holding the 1258 * SCL_ZIO lock it will result in a deadlock when we try to issue 1259 * the read from the root filesystem. Instead we "prefetch" 1260 * the associated vnodes that we need prior to opening the 1261 * underlying devices and cache them so that we can prevent 1262 * any I/O when we are doing the actual open. 1263 */ 1264 if (spa_is_root(spa)) { 1265 int low = locks & ~(SCL_ZIO - 1); 1266 int high = locks & ~low; 1267 1268 spa_config_enter(spa, high, spa, RW_WRITER); 1269 vdev_hold(spa->spa_root_vdev); 1270 spa_config_enter(spa, low, spa, RW_WRITER); 1271 } else { 1272 spa_config_enter(spa, locks, spa, RW_WRITER); 1273 } 1274 spa->spa_vdev_locks = locks; 1275} 1276 1277int 1278spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1279{ 1280 boolean_t config_changed = B_FALSE; 1281 1282 if (vd != NULL || error == 0) 1283 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1284 0, 0, B_FALSE); 1285 1286 if (vd != NULL) { 1287 vdev_state_dirty(vd->vdev_top); 1288 config_changed = B_TRUE; 1289 spa->spa_config_generation++; 1290 } 1291 1292 if (spa_is_root(spa)) 1293 vdev_rele(spa->spa_root_vdev); 1294 1295 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1296 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1297 1298 /* 1299 * If anything changed, wait for it to sync. This ensures that, 1300 * from the system administrator's perspective, zpool(1M) commands 1301 * are synchronous. This is important for things like zpool offline: 1302 * when the command completes, you expect no further I/O from ZFS. 1303 */ 1304 if (vd != NULL) 1305 txg_wait_synced(spa->spa_dsl_pool, 0); 1306 1307 /* 1308 * If the config changed, update the config cache. 1309 */ 1310 if (config_changed) { 1311 mutex_enter(&spa_namespace_lock); 1312 spa_config_sync(spa, B_FALSE, B_TRUE); 1313 mutex_exit(&spa_namespace_lock); 1314 } 1315 1316 return (error); 1317} 1318 1319/* 1320 * ========================================================================== 1321 * Miscellaneous functions 1322 * ========================================================================== 1323 */ 1324 1325void 1326spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1327{ 1328 if (!nvlist_exists(spa->spa_label_features, feature)) { 1329 fnvlist_add_boolean(spa->spa_label_features, feature); 1330 /* 1331 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1332 * dirty the vdev config because lock SCL_CONFIG is not held. 1333 * Thankfully, in this case we don't need to dirty the config 1334 * because it will be written out anyway when we finish 1335 * creating the pool. 1336 */ 1337 if (tx->tx_txg != TXG_INITIAL) 1338 vdev_config_dirty(spa->spa_root_vdev); 1339 } 1340} 1341 1342void 1343spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1344{ 1345 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1346 vdev_config_dirty(spa->spa_root_vdev); 1347} 1348 1349/* 1350 * Rename a spa_t. 1351 */ 1352int 1353spa_rename(const char *name, const char *newname) 1354{ 1355 spa_t *spa; 1356 int err; 1357 1358 /* 1359 * Lookup the spa_t and grab the config lock for writing. We need to 1360 * actually open the pool so that we can sync out the necessary labels. 1361 * It's OK to call spa_open() with the namespace lock held because we 1362 * allow recursive calls for other reasons. 1363 */ 1364 mutex_enter(&spa_namespace_lock); 1365 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1366 mutex_exit(&spa_namespace_lock); 1367 return (err); 1368 } 1369 1370 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1371 1372 avl_remove(&spa_namespace_avl, spa); 1373 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1374 avl_add(&spa_namespace_avl, spa); 1375 1376 /* 1377 * Sync all labels to disk with the new names by marking the root vdev 1378 * dirty and waiting for it to sync. It will pick up the new pool name 1379 * during the sync. 1380 */ 1381 vdev_config_dirty(spa->spa_root_vdev); 1382 1383 spa_config_exit(spa, SCL_ALL, FTAG); 1384 1385 txg_wait_synced(spa->spa_dsl_pool, 0); 1386 1387 /* 1388 * Sync the updated config cache. 1389 */ 1390 spa_config_sync(spa, B_FALSE, B_TRUE); 1391 1392 spa_close(spa, FTAG); 1393 1394 mutex_exit(&spa_namespace_lock); 1395 1396 return (0); 1397} 1398 1399/* 1400 * Return the spa_t associated with given pool_guid, if it exists. If 1401 * device_guid is non-zero, determine whether the pool exists *and* contains 1402 * a device with the specified device_guid. 1403 */ 1404spa_t * 1405spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1406{ 1407 spa_t *spa; 1408 avl_tree_t *t = &spa_namespace_avl; 1409 1410 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1411 1412 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1413 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1414 continue; 1415 if (spa->spa_root_vdev == NULL) 1416 continue; 1417 if (spa_guid(spa) == pool_guid) { 1418 if (device_guid == 0) 1419 break; 1420 1421 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1422 device_guid) != NULL) 1423 break; 1424 1425 /* 1426 * Check any devices we may be in the process of adding. 1427 */ 1428 if (spa->spa_pending_vdev) { 1429 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1430 device_guid) != NULL) 1431 break; 1432 } 1433 } 1434 } 1435 1436 return (spa); 1437} 1438 1439/* 1440 * Determine whether a pool with the given pool_guid exists. 1441 */ 1442boolean_t 1443spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1444{ 1445 return (spa_by_guid(pool_guid, device_guid) != NULL); 1446} 1447 1448char * 1449spa_strdup(const char *s) 1450{ 1451 size_t len; 1452 char *new; 1453 1454 len = strlen(s); 1455 new = kmem_alloc(len + 1, KM_SLEEP); 1456 bcopy(s, new, len); 1457 new[len] = '\0'; 1458 1459 return (new); 1460} 1461 1462void 1463spa_strfree(char *s) 1464{ 1465 kmem_free(s, strlen(s) + 1); 1466} 1467 1468uint64_t 1469spa_get_random(uint64_t range) 1470{ 1471 uint64_t r; 1472 1473 ASSERT(range != 0); 1474 1475 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1476 1477 return (r % range); 1478} 1479 1480uint64_t 1481spa_generate_guid(spa_t *spa) 1482{ 1483 uint64_t guid = spa_get_random(-1ULL); 1484 1485 if (spa != NULL) { 1486 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1487 guid = spa_get_random(-1ULL); 1488 } else { 1489 while (guid == 0 || spa_guid_exists(guid, 0)) 1490 guid = spa_get_random(-1ULL); 1491 } 1492 1493 return (guid); 1494} 1495 1496void 1497snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1498{ 1499 char type[256]; 1500 char *checksum = NULL; 1501 char *compress = NULL; 1502 1503 if (bp != NULL) { 1504 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1505 dmu_object_byteswap_t bswap = 1506 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1507 (void) snprintf(type, sizeof (type), "bswap %s %s", 1508 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1509 "metadata" : "data", 1510 dmu_ot_byteswap[bswap].ob_name); 1511 } else { 1512 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1513 sizeof (type)); 1514 } 1515 if (!BP_IS_EMBEDDED(bp)) { 1516 checksum = 1517 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1518 } 1519 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1520 } 1521 1522 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1523 compress); 1524} 1525 1526void 1527spa_freeze(spa_t *spa) 1528{ 1529 uint64_t freeze_txg = 0; 1530 1531 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1532 if (spa->spa_freeze_txg == UINT64_MAX) { 1533 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1534 spa->spa_freeze_txg = freeze_txg; 1535 } 1536 spa_config_exit(spa, SCL_ALL, FTAG); 1537 if (freeze_txg != 0) 1538 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1539} 1540 1541void 1542zfs_panic_recover(const char *fmt, ...) 1543{ 1544 va_list adx; 1545 1546 va_start(adx, fmt); 1547 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1548 va_end(adx); 1549} 1550 1551/* 1552 * This is a stripped-down version of strtoull, suitable only for converting 1553 * lowercase hexadecimal numbers that don't overflow. 1554 */ 1555uint64_t 1556zfs_strtonum(const char *str, char **nptr) 1557{ 1558 uint64_t val = 0; 1559 char c; 1560 int digit; 1561 1562 while ((c = *str) != '\0') { 1563 if (c >= '0' && c <= '9') 1564 digit = c - '0'; 1565 else if (c >= 'a' && c <= 'f') 1566 digit = 10 + c - 'a'; 1567 else 1568 break; 1569 1570 val *= 16; 1571 val += digit; 1572 1573 str++; 1574 } 1575 1576 if (nptr) 1577 *nptr = (char *)str; 1578 1579 return (val); 1580} 1581 1582/* 1583 * ========================================================================== 1584 * Accessor functions 1585 * ========================================================================== 1586 */ 1587 1588boolean_t 1589spa_shutting_down(spa_t *spa) 1590{ 1591 return (spa->spa_async_suspended); 1592} 1593 1594dsl_pool_t * 1595spa_get_dsl(spa_t *spa) 1596{ 1597 return (spa->spa_dsl_pool); 1598} 1599 1600boolean_t 1601spa_is_initializing(spa_t *spa) 1602{ 1603 return (spa->spa_is_initializing); 1604} 1605 1606blkptr_t * 1607spa_get_rootblkptr(spa_t *spa) 1608{ 1609 return (&spa->spa_ubsync.ub_rootbp); 1610} 1611 1612void 1613spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1614{ 1615 spa->spa_uberblock.ub_rootbp = *bp; 1616} 1617 1618void 1619spa_altroot(spa_t *spa, char *buf, size_t buflen) 1620{ 1621 if (spa->spa_root == NULL) 1622 buf[0] = '\0'; 1623 else 1624 (void) strncpy(buf, spa->spa_root, buflen); 1625} 1626 1627int 1628spa_sync_pass(spa_t *spa) 1629{ 1630 return (spa->spa_sync_pass); 1631} 1632 1633char * 1634spa_name(spa_t *spa) 1635{ 1636 return (spa->spa_name); 1637} 1638 1639uint64_t 1640spa_guid(spa_t *spa) 1641{ 1642 dsl_pool_t *dp = spa_get_dsl(spa); 1643 uint64_t guid; 1644 1645 /* 1646 * If we fail to parse the config during spa_load(), we can go through 1647 * the error path (which posts an ereport) and end up here with no root 1648 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1649 * this case. 1650 */ 1651 if (spa->spa_root_vdev == NULL) 1652 return (spa->spa_config_guid); 1653 1654 guid = spa->spa_last_synced_guid != 0 ? 1655 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1656 1657 /* 1658 * Return the most recently synced out guid unless we're 1659 * in syncing context. 1660 */ 1661 if (dp && dsl_pool_sync_context(dp)) 1662 return (spa->spa_root_vdev->vdev_guid); 1663 else 1664 return (guid); 1665} 1666 1667uint64_t 1668spa_load_guid(spa_t *spa) 1669{ 1670 /* 1671 * This is a GUID that exists solely as a reference for the 1672 * purposes of the arc. It is generated at load time, and 1673 * is never written to persistent storage. 1674 */ 1675 return (spa->spa_load_guid); 1676} 1677 1678uint64_t 1679spa_last_synced_txg(spa_t *spa) 1680{ 1681 return (spa->spa_ubsync.ub_txg); 1682} 1683 1684uint64_t 1685spa_first_txg(spa_t *spa) 1686{ 1687 return (spa->spa_first_txg); 1688} 1689 1690uint64_t 1691spa_syncing_txg(spa_t *spa) 1692{ 1693 return (spa->spa_syncing_txg); 1694} 1695 1696pool_state_t 1697spa_state(spa_t *spa) 1698{ 1699 return (spa->spa_state); 1700} 1701 1702spa_load_state_t 1703spa_load_state(spa_t *spa) 1704{ 1705 return (spa->spa_load_state); 1706} 1707 1708uint64_t 1709spa_freeze_txg(spa_t *spa) 1710{ 1711 return (spa->spa_freeze_txg); 1712} 1713 1714/* ARGSUSED */ 1715uint64_t 1716spa_get_asize(spa_t *spa, uint64_t lsize) 1717{ 1718 return (lsize * spa_asize_inflation); 1719} 1720 1721/* 1722 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1723 * or at least 32MB. 1724 * 1725 * See the comment above spa_slop_shift for details. 1726 */ 1727uint64_t 1728spa_get_slop_space(spa_t *spa) { 1729 uint64_t space = spa_get_dspace(spa); 1730 return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1)); 1731} 1732 1733uint64_t 1734spa_get_dspace(spa_t *spa) 1735{ 1736 return (spa->spa_dspace); 1737} 1738 1739void 1740spa_update_dspace(spa_t *spa) 1741{ 1742 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1743 ddt_get_dedup_dspace(spa); 1744} 1745 1746/* 1747 * Return the failure mode that has been set to this pool. The default 1748 * behavior will be to block all I/Os when a complete failure occurs. 1749 */ 1750uint8_t 1751spa_get_failmode(spa_t *spa) 1752{ 1753 return (spa->spa_failmode); 1754} 1755 1756boolean_t 1757spa_suspended(spa_t *spa) 1758{ 1759 return (spa->spa_suspended); 1760} 1761 1762uint64_t 1763spa_version(spa_t *spa) 1764{ 1765 return (spa->spa_ubsync.ub_version); 1766} 1767 1768boolean_t 1769spa_deflate(spa_t *spa) 1770{ 1771 return (spa->spa_deflate); 1772} 1773 1774metaslab_class_t * 1775spa_normal_class(spa_t *spa) 1776{ 1777 return (spa->spa_normal_class); 1778} 1779 1780metaslab_class_t * 1781spa_log_class(spa_t *spa) 1782{ 1783 return (spa->spa_log_class); 1784} 1785 1786void 1787spa_evicting_os_register(spa_t *spa, objset_t *os) 1788{ 1789 mutex_enter(&spa->spa_evicting_os_lock); 1790 list_insert_head(&spa->spa_evicting_os_list, os); 1791 mutex_exit(&spa->spa_evicting_os_lock); 1792} 1793 1794void 1795spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1796{ 1797 mutex_enter(&spa->spa_evicting_os_lock); 1798 list_remove(&spa->spa_evicting_os_list, os); 1799 cv_broadcast(&spa->spa_evicting_os_cv); 1800 mutex_exit(&spa->spa_evicting_os_lock); 1801} 1802 1803void 1804spa_evicting_os_wait(spa_t *spa) 1805{ 1806 mutex_enter(&spa->spa_evicting_os_lock); 1807 while (!list_is_empty(&spa->spa_evicting_os_list)) 1808 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1809 mutex_exit(&spa->spa_evicting_os_lock); 1810 1811 dmu_buf_user_evict_wait(); 1812} 1813 1814int 1815spa_max_replication(spa_t *spa) 1816{ 1817 /* 1818 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1819 * handle BPs with more than one DVA allocated. Set our max 1820 * replication level accordingly. 1821 */ 1822 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1823 return (1); 1824 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1825} 1826 1827int 1828spa_prev_software_version(spa_t *spa) 1829{ 1830 return (spa->spa_prev_software_version); 1831} 1832 1833uint64_t 1834spa_deadman_synctime(spa_t *spa) 1835{ 1836 return (spa->spa_deadman_synctime); 1837} 1838 1839uint64_t 1840dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1841{ 1842 uint64_t asize = DVA_GET_ASIZE(dva); 1843 uint64_t dsize = asize; 1844 1845 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1846 1847 if (asize != 0 && spa->spa_deflate) { 1848 uint64_t vdev = DVA_GET_VDEV(dva); 1849 vdev_t *vd = vdev_lookup_top(spa, vdev); 1850 if (vd == NULL) { 1851 panic( 1852 "dva_get_dsize_sync(): bad DVA %llu:%llu", 1853 (u_longlong_t)vdev, (u_longlong_t)asize); 1854 } 1855 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1856 } 1857 1858 return (dsize); 1859} 1860 1861uint64_t 1862bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1863{ 1864 uint64_t dsize = 0; 1865 1866 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1867 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1868 1869 return (dsize); 1870} 1871 1872uint64_t 1873bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1874{ 1875 uint64_t dsize = 0; 1876 1877 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1878 1879 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1880 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1881 1882 spa_config_exit(spa, SCL_VDEV, FTAG); 1883 1884 return (dsize); 1885} 1886 1887/* 1888 * ========================================================================== 1889 * Initialization and Termination 1890 * ========================================================================== 1891 */ 1892 1893static int 1894spa_name_compare(const void *a1, const void *a2) 1895{ 1896 const spa_t *s1 = a1; 1897 const spa_t *s2 = a2; 1898 int s; 1899 1900 s = strcmp(s1->spa_name, s2->spa_name); 1901 if (s > 0) 1902 return (1); 1903 if (s < 0) 1904 return (-1); 1905 return (0); 1906} 1907 1908int 1909spa_busy(void) 1910{ 1911 return (spa_active_count); 1912} 1913 1914void 1915spa_boot_init() 1916{ 1917 spa_config_load(); 1918} 1919 1920#ifdef _KERNEL 1921EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1922#endif 1923 1924void 1925spa_init(int mode) 1926{ 1927 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1928 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1929 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1930 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1931 1932 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1933 offsetof(spa_t, spa_avl)); 1934 1935 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1936 offsetof(spa_aux_t, aux_avl)); 1937 1938 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1939 offsetof(spa_aux_t, aux_avl)); 1940 1941 spa_mode_global = mode; 1942 1943#ifdef illumos 1944#ifdef _KERNEL 1945 spa_arch_init(); 1946#else 1947 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1948 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1949 if (arc_procfd == -1) { 1950 perror("could not enable watchpoints: " 1951 "opening /proc/self/ctl failed: "); 1952 } else { 1953 arc_watch = B_TRUE; 1954 } 1955 } 1956#endif 1957#endif /* illumos */ 1958 refcount_sysinit(); 1959 unique_init(); 1960 range_tree_init(); 1961 zio_init(); 1962 lz4_init(); 1963 dmu_init(); 1964 zil_init(); 1965 vdev_cache_stat_init(); 1966 zfs_prop_init(); 1967 zpool_prop_init(); 1968 zpool_feature_init(); 1969 spa_config_load(); 1970 l2arc_start(); 1971#ifndef illumos 1972#ifdef _KERNEL 1973 zfs_deadman_init(); 1974#endif 1975#endif /* !illumos */ 1976} 1977 1978void 1979spa_fini(void) 1980{ 1981 l2arc_stop(); 1982 1983 spa_evict_all(); 1984 1985 vdev_cache_stat_fini(); 1986 zil_fini(); 1987 dmu_fini(); 1988 lz4_fini(); 1989 zio_fini(); 1990 range_tree_fini(); 1991 unique_fini(); 1992 refcount_fini(); 1993 1994 avl_destroy(&spa_namespace_avl); 1995 avl_destroy(&spa_spare_avl); 1996 avl_destroy(&spa_l2cache_avl); 1997 1998 cv_destroy(&spa_namespace_cv); 1999 mutex_destroy(&spa_namespace_lock); 2000 mutex_destroy(&spa_spare_lock); 2001 mutex_destroy(&spa_l2cache_lock); 2002} 2003 2004/* 2005 * Return whether this pool has slogs. No locking needed. 2006 * It's not a problem if the wrong answer is returned as it's only for 2007 * performance and not correctness 2008 */ 2009boolean_t 2010spa_has_slogs(spa_t *spa) 2011{ 2012 return (spa->spa_log_class->mc_rotor != NULL); 2013} 2014 2015spa_log_state_t 2016spa_get_log_state(spa_t *spa) 2017{ 2018 return (spa->spa_log_state); 2019} 2020 2021void 2022spa_set_log_state(spa_t *spa, spa_log_state_t state) 2023{ 2024 spa->spa_log_state = state; 2025} 2026 2027boolean_t 2028spa_is_root(spa_t *spa) 2029{ 2030 return (spa->spa_is_root); 2031} 2032 2033boolean_t 2034spa_writeable(spa_t *spa) 2035{ 2036 return (!!(spa->spa_mode & FWRITE)); 2037} 2038 2039/* 2040 * Returns true if there is a pending sync task in any of the current 2041 * syncing txg, the current quiescing txg, or the current open txg. 2042 */ 2043boolean_t 2044spa_has_pending_synctask(spa_t *spa) 2045{ 2046 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2047} 2048 2049int 2050spa_mode(spa_t *spa) 2051{ 2052 return (spa->spa_mode); 2053} 2054 2055uint64_t 2056spa_bootfs(spa_t *spa) 2057{ 2058 return (spa->spa_bootfs); 2059} 2060 2061uint64_t 2062spa_delegation(spa_t *spa) 2063{ 2064 return (spa->spa_delegation); 2065} 2066 2067objset_t * 2068spa_meta_objset(spa_t *spa) 2069{ 2070 return (spa->spa_meta_objset); 2071} 2072 2073enum zio_checksum 2074spa_dedup_checksum(spa_t *spa) 2075{ 2076 return (spa->spa_dedup_checksum); 2077} 2078 2079/* 2080 * Reset pool scan stat per scan pass (or reboot). 2081 */ 2082void 2083spa_scan_stat_init(spa_t *spa) 2084{ 2085 /* data not stored on disk */ 2086 spa->spa_scan_pass_start = gethrestime_sec(); 2087 spa->spa_scan_pass_exam = 0; 2088 vdev_scan_stat_init(spa->spa_root_vdev); 2089} 2090 2091/* 2092 * Get scan stats for zpool status reports 2093 */ 2094int 2095spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2096{ 2097 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2098 2099 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2100 return (SET_ERROR(ENOENT)); 2101 bzero(ps, sizeof (pool_scan_stat_t)); 2102 2103 /* data stored on disk */ 2104 ps->pss_func = scn->scn_phys.scn_func; 2105 ps->pss_start_time = scn->scn_phys.scn_start_time; 2106 ps->pss_end_time = scn->scn_phys.scn_end_time; 2107 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2108 ps->pss_examined = scn->scn_phys.scn_examined; 2109 ps->pss_to_process = scn->scn_phys.scn_to_process; 2110 ps->pss_processed = scn->scn_phys.scn_processed; 2111 ps->pss_errors = scn->scn_phys.scn_errors; 2112 ps->pss_state = scn->scn_phys.scn_state; 2113 2114 /* data not stored on disk */ 2115 ps->pss_pass_start = spa->spa_scan_pass_start; 2116 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2117 2118 return (0); 2119} 2120 2121boolean_t 2122spa_debug_enabled(spa_t *spa) 2123{ 2124 return (spa->spa_debug); 2125} 2126 2127int 2128spa_maxblocksize(spa_t *spa) 2129{ 2130 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2131 return (SPA_MAXBLOCKSIZE); 2132 else 2133 return (SPA_OLD_MAXBLOCKSIZE); 2134} 2135